From 975f66f2eebe9dadba04f275774d4ab83f74cf25 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 13 Apr 2024 14:04:41 +0200 Subject: Adding upstream version 7.7.0+dfsg. Signed-off-by: Daniel Baumann --- .../aws/.github/ISSUE_TEMPLATE/bug_report.yml | 210 + .../aws/.github/ISSUE_TEMPLATE/feature_request.yml | 100 + .../netapp/aws/.github/workflows/coverage.yml | 45 + .../netapp/aws/.github/workflows/main.yml | 45 + ansible_collections/netapp/aws/CHANGELOG.rst | 90 + ansible_collections/netapp/aws/COPYING | 674 ++ ansible_collections/netapp/aws/FILES.json | 376 ++ ansible_collections/netapp/aws/MANIFEST.json | 35 + ansible_collections/netapp/aws/README.md | 86 + .../netapp/aws/changelogs/changelog.yaml | 81 + .../netapp/aws/changelogs/config.yaml | 32 + .../netapp/aws/changelogs/fragments/20.2.0.yaml | 2 + .../netapp/aws/changelogs/fragments/20.6.0.yaml | 2 + .../netapp/aws/changelogs/fragments/20.8.0.yaml | 3 + .../netapp/aws/changelogs/fragments/20.9.0.yaml | 2 + .../netapp/aws/changelogs/fragments/2019.10.0.yaml | 2 + .../aws/changelogs/fragments/DEVOPS-3569.yaml | 4 + .../aws/changelogs/fragments/DEVOPS-3644.yaml | 5 + .../aws/changelogs/fragments/DEVOPS-4416.yaml | 2 + ansible_collections/netapp/aws/meta/runtime.yml | 8 + .../netapp/aws/plugins/doc_fragments/netapp.py | 55 + .../netapp/aws/plugins/module_utils/netapp.py | 241 + .../aws/plugins/module_utils/netapp_module.py | 142 + .../modules/aws_netapp_cvs_active_directory.py | 276 + .../plugins/modules/aws_netapp_cvs_filesystems.py | 362 + .../aws/plugins/modules/aws_netapp_cvs_pool.py | 267 + .../plugins/modules/aws_netapp_cvs_snapshots.py | 245 + ansible_collections/netapp/aws/requirements.txt | 1 + .../netapp/aws/tests/unit/compat/__init__.py | 0 .../netapp/aws/tests/unit/compat/builtins.py | 33 + .../netapp/aws/tests/unit/compat/mock.py | 122 + .../netapp/aws/tests/unit/compat/unittest.py | 44 + .../tests/unit/plugins/module_utils/test_netapp.py | 195 + .../test_aws_netapp_cvs_active_directory.py | 117 + .../modules/test_aws_netapp_cvs_filesystems.py | 155 + .../plugins/modules/test_aws_netapp_cvs_pool.py | 258 + .../modules/test_aws_netapp_cvs_snapshots.py | 147 + .../netapp/aws/tests/unit/requirements.txt | 1 + .../azure/.github/ISSUE_TEMPLATE/bug_report.yml | 210 + .../.github/ISSUE_TEMPLATE/feature_request.yml | 100 + .../netapp/azure/.github/workflows/coverage.yml | 45 + .../netapp/azure/.github/workflows/main.yml | 64 + ansible_collections/netapp/azure/CHANGELOG.rst | 171 + ansible_collections/netapp/azure/COPYING | 674 ++ ansible_collections/netapp/azure/FILES.json | 705 ++ ansible_collections/netapp/azure/HACK.md | 13 + ansible_collections/netapp/azure/MANIFEST.json | 37 + ansible_collections/netapp/azure/README.md | 157 + .../netapp/azure/changelogs/.plugin-cache.yaml | 35 + .../netapp/azure/changelogs/changelog.yaml | 169 + .../netapp/azure/changelogs/config.yaml | 32 + .../netapp/azure/changelogs/fragments/20.2.0.yaml | 2 + .../netapp/azure/changelogs/fragments/20.4.0.yaml | 3 + .../netapp/azure/changelogs/fragments/20.5.0.yaml | 6 + .../netapp/azure/changelogs/fragments/20.6.0.yaml | 3 + .../netapp/azure/changelogs/fragments/20.7.0.yaml | 2 + .../netapp/azure/changelogs/fragments/20.8.0.yaml | 3 + .../azure/changelogs/fragments/DEVOPS-3505.yaml | 4 + .../azure/changelogs/fragments/DEVOPS-3526.yaml | 3 + .../azure/changelogs/fragments/DEVOPS-3663.yaml | 2 + .../azure/changelogs/fragments/DEVOPS-3704.yaml | 5 + .../azure/changelogs/fragments/DEVOPS-3849.yaml | 3 + .../azure/changelogs/fragments/DEVOPS-3935.yaml | 2 + .../azure/changelogs/fragments/DEVOPS-3949.yaml | 6 + .../azure/changelogs/fragments/DEVOPS-4001.yaml | 2 + .../azure/changelogs/fragments/DEVOPS-4070.yaml | 3 + .../azure/changelogs/fragments/DEVOPS-4135.yaml | 2 + .../azure/changelogs/fragments/DEVOPS-4246.yaml | 4 + .../azure/changelogs/fragments/DEVOPS-4416.yaml | 2 + ansible_collections/netapp/azure/meta/runtime.yml | 8 + .../netapp/azure/plugins/doc_fragments/azure.py | 129 + .../azure/plugins/doc_fragments/azure_tags.py | 31 + .../netapp/azure/plugins/doc_fragments/netapp.py | 43 + .../plugins/module_utils/azure_rm_netapp_common.py | 156 + .../azure/plugins/module_utils/netapp_module.py | 271 + .../plugins/modules/azure_rm_netapp_account.py | 404 ++ .../modules/azure_rm_netapp_capacity_pool.py | 259 + .../plugins/modules/azure_rm_netapp_snapshot.py | 226 + .../plugins/modules/azure_rm_netapp_volume.py | 399 ++ ansible_collections/netapp/azure/requirements.txt | 3 + .../targets/azure_rm_netapp_account/aliases | 3 + .../targets/azure_rm_netapp_account/meta/main.yml | 2 + .../targets/azure_rm_netapp_account/tasks/main.yml | 41 + .../targets/azure_rm_netapp_capacity_pool/aliases | 3 + .../azure_rm_netapp_capacity_pool/meta/main.yml | 2 + .../azure_rm_netapp_capacity_pool/tasks/main.yml | 47 + .../targets/azure_rm_netapp_snapshot/aliases | 3 + .../targets/azure_rm_netapp_snapshot/meta/main.yml | 2 + .../azure_rm_netapp_snapshot/tasks/main.yml | 51 + .../targets/azure_rm_netapp_volume/aliases | 3 + .../targets/azure_rm_netapp_volume/meta/main.yml | 2 + .../targets/azure_rm_netapp_volume/tasks/main.yml | 57 + .../requirements/integration.cloud.azure.txt | 1 + .../runner/requirements/requirements-azure.txt | 9 + .../tests/runner/requirements/unit.cloud.azure.txt | 1 + .../azure/tests/runner/requirements/units.txt | 1 + .../netapp/azure/tests/unit/compat/__init__.py | 0 .../netapp/azure/tests/unit/compat/builtins.py | 33 + .../netapp/azure/tests/unit/compat/mock.py | 122 + .../netapp/azure/tests/unit/compat/unittest.py | 44 + .../plugins/module_utils/test_netapp_module.py | 149 + .../modules/test_azure_rm_netapp_account.py | 173 + .../modules/test_azure_rm_netapp_capacity_pool.py | 197 + .../modules/test_azure_rm_netapp_snapshot.py | 165 + .../plugins/modules/test_azure_rm_netapp_volume.py | 501 ++ .../modules/test_azure_rm_netapp_volume_import.py | 74 + .../netapp/azure/tests/unit/requirements.txt | 3 + .../.github/ISSUE_TEMPLATE/bug_report.yml | 210 + .../.github/ISSUE_TEMPLATE/feature_request.yml | 100 + .../cloudmanager/.github/workflows/coverage.yml | 45 + .../netapp/cloudmanager/.github/workflows/main.yml | 47 + .../netapp/cloudmanager/CHANGELOG.rst | 325 + ansible_collections/netapp/cloudmanager/COPYING | 674 ++ ansible_collections/netapp/cloudmanager/FILES.json | 1006 +++ .../netapp/cloudmanager/MANIFEST.json | 37 + ansible_collections/netapp/cloudmanager/README.md | 262 + .../cloudmanager/changelogs/.plugin-cache.yaml | 81 + .../netapp/cloudmanager/changelogs/changelog.yaml | 374 ++ .../netapp/cloudmanager/changelogs/config.yaml | 32 + .../changelogs/fragments/DEVOPS-3803.yaml | 2 + .../changelogs/fragments/DEVOPS-3844.yaml | 4 + .../changelogs/fragments/DEVOPS-3909.yaml | 4 + .../changelogs/fragments/DEVOPS-3910.yaml | 2 + .../changelogs/fragments/DEVOPS-3911.yaml | 2 + .../changelogs/fragments/DEVOPS-3912.yaml | 2 + .../changelogs/fragments/DEVOPS-3913.yaml | 2 + .../changelogs/fragments/DEVOPS-3922.yaml | 2 + .../changelogs/fragments/DEVOPS-3946.yaml | 2 + .../changelogs/fragments/DEVOPS-3947.yaml | 2 + .../changelogs/fragments/DEVOPS-3948.yaml | 2 + .../changelogs/fragments/DEVOPS-3965.yaml | 2 + .../changelogs/fragments/DEVOPS-3967.yaml | 2 + .../changelogs/fragments/DEVOPS-3975.yaml | 2 + .../changelogs/fragments/DEVOPS-3984.yaml | 4 + .../changelogs/fragments/DEVOPS-3985.yaml | 2 + .../changelogs/fragments/DEVOPS-3995.yaml | 2 + .../changelogs/fragments/DEVOPS-4021.yaml | 2 + .../changelogs/fragments/DEVOPS-4065.yaml | 5 + .../changelogs/fragments/DEVOPS-4105.yaml | 13 + .../changelogs/fragments/DEVOPS-4118.yaml | 2 + .../changelogs/fragments/DEVOPS-4136.yaml | 2 + .../changelogs/fragments/DEVOPS-4164.yaml | 4 + .../changelogs/fragments/DEVOPS-4200.yaml | 2 + .../changelogs/fragments/DEVOPS-4201.yaml | 2 + .../changelogs/fragments/DEVOPS-4205.yaml | 2 + .../changelogs/fragments/DEVOPS-4223.yaml | 4 + .../changelogs/fragments/DEVOPS-4264.yaml | 2 + .../changelogs/fragments/DEVOPS-4267.yaml | 2 + .../changelogs/fragments/DEVOPS-4271.yaml | 4 + .../changelogs/fragments/DEVOPS-4281.yaml | 2 + .../changelogs/fragments/DEVOPS-4292.yaml | 8 + .../changelogs/fragments/DEVOPS-4298.yaml | 2 + .../changelogs/fragments/DEVOPS-4303.yaml | 2 + .../changelogs/fragments/DEVOPS-4321.yaml | 4 + .../changelogs/fragments/DEVOPS-4327.yaml | 2 + .../changelogs/fragments/DEVOPS-4328.yaml | 2 + .../changelogs/fragments/DEVOPS-4358.yaml | 3 + .../changelogs/fragments/DEVOPS-4386.yaml | 2 + .../changelogs/fragments/DEVOPS-4416.yaml | 2 + .../changelogs/fragments/DEVOPS-4458.yaml | 2 + .../changelogs/fragments/DEVOPS-4492.yaml | 5 + .../changelogs/fragments/DEVOPS-4500.yaml | 2 + .../changelogs/fragments/DEVOPS-4513.yaml | 2 + .../changelogs/fragments/DEVOPS-4516.yaml | 2 + .../changelogs/fragments/DEVOPS-4542.yaml | 2 + .../changelogs/fragments/DEVOPS-4563.yaml | 2 + .../changelogs/fragments/DEVOPS-4567.yaml | 2 + .../changelogs/fragments/DEVOPS-4647.yaml | 2 + .../changelogs/fragments/DEVOPS-4703.yaml | 2 + .../changelogs/fragments/DEVOPS-4758.yaml | 2 + .../changelogs/fragments/DEVOPS-4820.yaml | 2 + .../changelogs/fragments/DEVOPS-5002.yaml | 2 + .../changelogs/fragments/DEVOPS-5151.yaml | 2 + .../changelogs/fragments/DEVOPS-5252.yaml | 2 + .../changelogs/fragments/DEVOPS-5307.yaml | 2 + .../changelogs/fragments/DEVOPS-5342.yaml | 2 + .../changelogs/fragments/DEVOPS-5366.yaml | 2 + .../changelogs/fragments/DEVOPS-5437.yaml | 2 + .../changelogs/fragments/DEVOPS-5452.yaml | 2 + .../changelogs/fragments/DEVOPS-5472.yaml | 2 + .../changelogs/fragments/DEVOPS-5527.yaml | 2 + .../changelogs/fragments/DEVOPS-5540.yaml | 2 + .../changelogs/fragments/DEVOPS-5562.yaml | 3 + .../cloudmanager/execution_environments/README.md | 34 + .../from_galaxy/execution-environment.yml | 10 + .../from_galaxy/requirements.yml | 13 + .../from_github/execution-environment.yml | 10 + .../from_github/requirements.yml | 18 + .../execution_environments/requirements.txt | 1 + .../netapp/cloudmanager/kubectl.sha256 | 1 + .../cloudmanager/meta/execution-environment.yml | 3 + .../netapp/cloudmanager/meta/runtime.yml | 17 + .../netapp/cloudmanager/plugins/README.md | 31 + .../cloudmanager/plugins/doc_fragments/netapp.py | 48 + .../cloudmanager/plugins/module_utils/netapp.py | 332 + .../plugins/module_utils/netapp_module.py | 1381 ++++ .../plugins/modules/na_cloudmanager_aggregate.py | 332 + .../plugins/modules/na_cloudmanager_aws_fsx.py | 458 ++ .../plugins/modules/na_cloudmanager_cifs_server.py | 265 + .../modules/na_cloudmanager_connector_aws.py | 655 ++ .../modules/na_cloudmanager_connector_azure.py | 591 ++ .../modules/na_cloudmanager_connector_gcp.py | 644 ++ .../plugins/modules/na_cloudmanager_cvo_aws.py | 855 +++ .../plugins/modules/na_cloudmanager_cvo_azure.py | 746 +++ .../plugins/modules/na_cloudmanager_cvo_gcp.py | 858 +++ .../plugins/modules/na_cloudmanager_info.py | 235 + .../plugins/modules/na_cloudmanager_nss_account.py | 192 + .../plugins/modules/na_cloudmanager_snapmirror.py | 471 ++ .../plugins/modules/na_cloudmanager_volume.py | 660 ++ .../netapp/cloudmanager/requirements.txt | 10 + .../cloudmanager/tests/unit/compat/__init__.py | 0 .../cloudmanager/tests/unit/compat/builtins.py | 33 + .../netapp/cloudmanager/tests/unit/compat/mock.py | 122 + .../cloudmanager/tests/unit/compat/unittest.py | 44 + .../tests/unit/plugins/module_utils/test_netapp.py | 506 ++ .../plugins/module_utils/test_netapp_module.py | 578 ++ .../module_utils/test_netapp_module_open.py | 77 + .../modules/test_na_cloudmanager_aggregate.py | 297 + .../modules/test_na_cloudmanager_aws_fsx.py | 165 + .../modules/test_na_cloudmanager_cifs_server.py | 252 + .../modules/test_na_cloudmanager_connector_aws.py | 730 ++ .../test_na_cloudmanager_connector_azure.py | 178 + .../modules/test_na_cloudmanager_connector_gcp.py | 407 ++ .../modules/test_na_cloudmanager_cvo_aws.py | 426 ++ .../modules/test_na_cloudmanager_cvo_azure.py | 439 ++ .../modules/test_na_cloudmanager_cvo_gcp.py | 543 ++ .../plugins/modules/test_na_cloudmanager_info.py | 591 ++ .../modules/test_na_cloudmanager_nss_account.py | 144 + .../modules/test_na_cloudmanager_snapmirror.py | 176 + .../plugins/modules/test_na_cloudmanager_volume.py | 216 + .../cloudmanager/tests/unit/requirements-azure.txt | 1 + .../cloudmanager/tests/unit/requirements.txt | 10 + .../.github/ISSUE_TEMPLATE/bug_report.yml | 210 + .../.github/ISSUE_TEMPLATE/feature_request.yml | 100 + .../elementsw/.github/workflows/coverage.yml | 45 + .../netapp/elementsw/.github/workflows/main.yml | 47 + ansible_collections/netapp/elementsw/CHANGELOG.rst | 192 + ansible_collections/netapp/elementsw/FILES.json | 649 ++ ansible_collections/netapp/elementsw/MANIFEST.json | 34 + ansible_collections/netapp/elementsw/README.md | 133 + .../netapp/elementsw/changelogs/changelog.yaml | 221 + .../netapp/elementsw/changelogs/config.yaml | 32 + .../elementsw/changelogs/fragments/20.2.0.yaml | 3 + .../elementsw/changelogs/fragments/20.6.0.yaml | 2 + .../elementsw/changelogs/fragments/20.8.0.yaml | 21 + .../elementsw/changelogs/fragments/20.9.0.yaml | 7 + .../elementsw/changelogs/fragments/2019.10.0.yaml | 2 + .../changelogs/fragments/DEVOPS-3117.yaml | 2 + .../changelogs/fragments/DEVOPS-3174.yaml | 2 + .../changelogs/fragments/DEVOPS-3188.yaml | 2 + .../changelogs/fragments/DEVOPS-3196.yaml | 2 + .../changelogs/fragments/DEVOPS-3235.yaml | 2 + .../elementsw/changelogs/fragments/DEVOPS-3310.yml | 2 + .../changelogs/fragments/DEVOPS-3324.yaml | 2 + .../changelogs/fragments/DEVOPS-3731.yaml | 4 + .../changelogs/fragments/DEVOPS-3733.yaml | 4 + .../changelogs/fragments/DEVOPS-3734.yaml | 2 + .../changelogs/fragments/DEVOPS-3800.yaml | 2 + .../changelogs/fragments/DEVOPS-4416.yaml | 2 + .../netapp/elementsw/meta/runtime.yml | 28 + .../elementsw/plugins/doc_fragments/netapp.py | 51 + .../elementsw/plugins/module_utils/netapp.py | 107 + .../module_utils/netapp_elementsw_module.py | 206 + .../plugins/module_utils/netapp_module.py | 225 + .../plugins/modules/na_elementsw_access_group.py | 397 ++ .../modules/na_elementsw_access_group_volumes.py | 247 + .../plugins/modules/na_elementsw_account.py | 340 + .../plugins/modules/na_elementsw_admin_users.py | 233 + .../plugins/modules/na_elementsw_backup.py | 243 + .../modules/na_elementsw_check_connections.py | 154 + .../plugins/modules/na_elementsw_cluster.py | 372 ++ .../plugins/modules/na_elementsw_cluster_config.py | 331 + .../plugins/modules/na_elementsw_cluster_pair.py | 206 + .../plugins/modules/na_elementsw_cluster_snmp.py | 365 + .../plugins/modules/na_elementsw_drive.py | 368 + .../elementsw/plugins/modules/na_elementsw_info.py | 272 + .../plugins/modules/na_elementsw_initiators.py | 343 + .../elementsw/plugins/modules/na_elementsw_ldap.py | 254 + .../modules/na_elementsw_network_interfaces.py | 423 ++ .../elementsw/plugins/modules/na_elementsw_node.py | 357 + .../plugins/modules/na_elementsw_qos_policy.py | 270 + .../plugins/modules/na_elementsw_snapshot.py | 369 + .../modules/na_elementsw_snapshot_restore.py | 203 + .../modules/na_elementsw_snapshot_schedule.py | 586 ++ .../elementsw/plugins/modules/na_elementsw_vlan.py | 274 + .../plugins/modules/na_elementsw_volume.py | 413 ++ .../plugins/modules/na_elementsw_volume_clone.py | 276 + .../plugins/modules/na_elementsw_volume_pair.py | 293 + .../netapp/elementsw/requirements.txt | 1 + .../netapp/elementsw/tests/unit/compat/__init__.py | 0 .../netapp/elementsw/tests/unit/compat/builtins.py | 33 + .../netapp/elementsw/tests/unit/compat/mock.py | 122 + .../netapp/elementsw/tests/unit/compat/unittest.py | 44 + .../modules/test_na_elementsw_access_group.py | 175 + .../test_na_elementsw_access_group_volumes.py | 245 + .../plugins/modules/test_na_elementsw_account.py | 137 + .../plugins/modules/test_na_elementsw_cluster.py | 228 + .../modules/test_na_elementsw_cluster_config.py | 157 + .../modules/test_na_elementsw_cluster_snmp.py | 176 + .../unit/plugins/modules/test_na_elementsw_info.py | 344 + .../modules/test_na_elementsw_initiators.py | 201 + .../test_na_elementsw_network_interfaces.py | 293 + .../plugins/modules/test_na_elementsw_nodes.py | 324 + .../modules/test_na_elementsw_qos_policy.py | 300 + .../plugins/modules/test_na_elementsw_template.py | 138 + .../unit/plugins/modules/test_na_elementsw_vlan.py | 343 + .../plugins/modules/test_na_elementsw_volume.py | 364 + .../plugins/modules_utils/test_netapp_module.py | 149 + .../netapp/elementsw/tests/unit/requirements.txt | 1 + .../ontap/.github/ISSUE_TEMPLATE/bug_report.yml | 222 + .../.github/ISSUE_TEMPLATE/feature_request.yml | 100 + .../ontap/.github/workflows/codeql-analysis.yml | 72 + .../netapp/ontap/.github/workflows/coverage.yml | 45 + .../netapp/ontap/.github/workflows/main.yml | 51 + ansible_collections/netapp/ontap/CHANGELOG.rst | 2048 ++++++ ansible_collections/netapp/ontap/COPYING | 674 ++ ansible_collections/netapp/ontap/FILES.json | 7019 ++++++++++++++++++++ ansible_collections/netapp/ontap/MANIFEST.json | 32 + ansible_collections/netapp/ontap/README.md | 1811 +++++ .../netapp/ontap/changelogs/.DS_Store | Bin 0 -> 6148 bytes .../netapp/ontap/changelogs/.plugin-cache.yaml | 683 ++ .../netapp/ontap/changelogs/changelog.yaml | 3035 +++++++++ .../netapp/ontap/changelogs/config.yaml | 32 + .../changelogs/fragments/0-copy_ignore_txt.yml | 4 + .../netapp/ontap/changelogs/fragments/19.10.0.yaml | 40 + .../netapp/ontap/changelogs/fragments/19.11.0.yaml | 16 + .../netapp/ontap/changelogs/fragments/20.1.0.yaml | 20 + .../netapp/ontap/changelogs/fragments/20.2.0.yaml | 17 + .../netapp/ontap/changelogs/fragments/20.3.0.yaml | 8 + .../netapp/ontap/changelogs/fragments/20.4.0.yaml | 30 + .../netapp/ontap/changelogs/fragments/20.4.1.yaml | 10 + .../netapp/ontap/changelogs/fragments/20.5.0.yaml | 53 + .../netapp/ontap/changelogs/fragments/20.6.0.yaml | 37 + .../netapp/ontap/changelogs/fragments/20.6.1.yaml | 9 + .../netapp/ontap/changelogs/fragments/20.7.0.yaml | 24 + .../netapp/ontap/changelogs/fragments/20.8.0.yaml | 33 + .../netapp/ontap/changelogs/fragments/20.9.0.yaml | 17 + .../ontap/changelogs/fragments/DEVOPS-1661.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-1665.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-1926.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2353.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2422.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2426.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2459.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2459b.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2491.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2668.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2928.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-2964.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2965.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-2972.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-3113.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3137.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3139.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3148.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3149.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-3167.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3175.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3178.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3181.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3194.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3230.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-3241.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-3242.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3251.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3262.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-3304.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-3310.yml | 5 + .../ontap/changelogs/fragments/DEVOPS-3312.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3329.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3346.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3354.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3358.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3366.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3367.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3368.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3369.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-3370.yaml | 11 + .../ontap/changelogs/fragments/DEVOPS-3371.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3385.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3386.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3390.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3392.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3399.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3400.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3401.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3439.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3442.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3443.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3454.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3479.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-3480.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3483.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3490.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3494.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3497.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3501.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3510.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3515.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3534.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-3535.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3536.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3540.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3542.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3543.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-3571.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3579.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3580.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3595.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3615.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3623.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3625.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3626.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3628.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3632.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3633.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3649.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3654.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3655.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3662.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3667.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3668.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3671.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3677.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3685.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3716.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3718.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3754.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3757.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3767.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-3772.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3801.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3807.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3811.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3812.yml | 9 + .../ontap/changelogs/fragments/DEVOPS-3830.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3850.yaml | 13 + .../ontap/changelogs/fragments/DEVOPS-3870.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3883.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3900.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3926.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3939.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3950.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3952.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-3969.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3971.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-3973.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-3983.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-3994.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4005.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4010.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4022.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4026.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4031.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4039.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4048.yaml | 9 + .../ontap/changelogs/fragments/DEVOPS-4049.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4060.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4079.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4113.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4114.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4116.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4119.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4121.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4122.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4123.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4140.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4150.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4157.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4159.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4161.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4175.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4177.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4179.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4190.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4191.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4196.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4197.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4206.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4218.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4227.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4228.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4231.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4235.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4243.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4255.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4256.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4270.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4288.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-4289.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4300.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4312.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4319.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4320.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4325.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4329.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4331.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4332.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4333.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4334.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4335.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4336.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4337.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-4338.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4339.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4340.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4341.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4342.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4343.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4344.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4345.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4347.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4348.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4349.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4350.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4367.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4391.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4392.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4393.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4394.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4399.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4401.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4404.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4415.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4417.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4435.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4439.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4449.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4457.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4459.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4460.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4465.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4479.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4487.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4501.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4508.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4526.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4527.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4540.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4554.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4565.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4566.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4568.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4573.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4577.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4588.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4604.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4605.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4606.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4609.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4612.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4621.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4623.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4644.yaml | 7 + .../ontap/changelogs/fragments/DEVOPS-4645.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4648.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4676.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4679.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4691.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4711.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4716.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4719.yml | 3 + .../ontap/changelogs/fragments/DEVOPS-4729.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4731.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-4735.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4736.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4737.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4743.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4745.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4747.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4762.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4763.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4764.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4767.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4769.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4770.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4771.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4773.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4774.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4775.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4776.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-4779.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4780.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4781.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4784.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-4785.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4786.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4788.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4789.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4790.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4794.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4798.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4799.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4800.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4801.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-4802.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4803.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4804.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4807.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-4808.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4809.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4813.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4818.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4830.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4832.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-4834.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4857.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4862.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4863.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4864.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4872.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-4879.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4882.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4898.yaml | 86 + .../ontap/changelogs/fragments/DEVOPS-4975.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4981.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-4984.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-4985.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-4998.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5015.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-5016.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5017.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5019.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5026.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5034.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5047.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5062.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5063.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5065.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5068.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5079.yml | 2 + .../ontap/changelogs/fragments/DEVOPS-5082.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5084.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5085.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5090.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5092.yaml | 8 + .../ontap/changelogs/fragments/DEVOPS-5109.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5121.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5127.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5136.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5137.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5138.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5152.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5161.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5168.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5174.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5179.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5188.yaml | 7 + .../ontap/changelogs/fragments/DEVOPS-5189.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5190.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5195.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5215.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5216.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5220.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5223.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5228.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5229.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-5241.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-5243.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5246.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5251.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5263.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5268.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5270.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5271.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5275.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5285.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5287.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5297.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5299.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5304.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5310.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5312.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5338.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5344.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5354.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5367.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5380.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5409.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5412.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5413.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5414.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5415.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5426.yaml | 31 + .../ontap/changelogs/fragments/DEVOPS-5427.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5430.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5431.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5453.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5457.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5479.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5481.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5484.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5485.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5487.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5503.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5504.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5505.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5506.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5507.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5531.yaml | 7 + .../ontap/changelogs/fragments/DEVOPS-5532.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5536.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5537.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5540.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5548.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5589.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5591.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5592.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5594.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5595.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5596.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5604.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5606.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5611.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5626.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5628.yaml | 8 + .../ontap/changelogs/fragments/DEVOPS-5629.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5659.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5662.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5665.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5666.yaml | 8 + .../ontap/changelogs/fragments/DEVOPS-5671.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5677.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5678.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5696.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5711.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5713.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5725.yaml | 7 + .../ontap/changelogs/fragments/DEVOPS-5733.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5734.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-5735.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5737.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5738.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5757.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5760.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5761.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-5774.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5784.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5788.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5790.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5807.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5808.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5809.yml | 4 + .../ontap/changelogs/fragments/DEVOPS-5812.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5816.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5819.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5820.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5844.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5845.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5859.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-5892.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5894.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5899.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5910.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5913.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5917.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5919.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5926.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-5938.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5948.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5952.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-5960.yaml | 4 + .../ontap/changelogs/fragments/DEVOPS-5972.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-5983.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-5986.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-6001.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6005.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6014.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-6015.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6191.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6192.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6193.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6195.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-6209.yaml | 2 + .../ontap/changelogs/fragments/DEVOPS-6233.yaml | 5 + .../ontap/changelogs/fragments/DEVOPS-6235.yaml | 6 + .../ontap/changelogs/fragments/DEVOPS-6262.yaml | 3 + .../ontap/changelogs/fragments/DEVOPS-6266.yaml | 3 + .../ontap/changelogs/fragments/github-110.yaml | 2 + .../ontap/changelogs/fragments/github-56.yaml | 2 + .../ontap/changelogs/fragments/no-story-1.yaml | 3 + .../netapp/ontap/execution_environments/README.md | 34 + .../from_galaxy/execution-environment.yml | 10 + .../from_galaxy/requirements.yml | 13 + .../from_github/execution-environment.yml | 10 + .../from_github/requirements.yml | 18 + .../ontap/execution_environments/requirements.txt | 1 + .../netapp/ontap/meta/execution-environment.yml | 3 + ansible_collections/netapp/ontap/meta/runtime.yml | 153 + .../netapp/ontap/playbooks/examples/README.md | 37 + .../examples/filter/test_na_filter_iso8601.yaml | 77 + .../ontap/playbooks/examples/json_query/README.md | 30 + .../json_query/na_ontap_pb_get_online_volumes.yml | 76 + .../na_ontap_pb_get_online_volumes_loop.yml | 85 + .../na_ontap_pb_install_SSL_certificate.yml | 209 + .../na_ontap_pb_install_SSL_certificate_REST.yml | 202 + .../examples/na_ontap_pb_upgrade_firmware.yml | 46 + ...a_ontap_pb_upgrade_firmware_with_extra_vars.yml | 47 + ...na_ontap_pb_upgrade_firmware_with_vars_file.yml | 45 + .../ontap/playbooks/examples/ontap_vars_file.yml | 27 + .../playbooks/examples/rest_apis/clusters.yaml | 19 + .../examples/rest_apis/list_aggregates.yaml | 42 + .../ontap/playbooks/examples/rest_apis/volumes.yml | 160 + .../examples/support/debug_connectivity.yaml | 42 + .../netapp/ontap/plugins/doc_fragments/netapp.py | 268 + .../filter/iso8601_duration_from_seconds.yml | 33 + .../plugins/filter/iso8601_duration_to_seconds.yml | 30 + .../ontap/plugins/filter/na_filter_iso8601.py | 53 + .../netapp/ontap/plugins/module_utils/netapp.py | 1134 ++++ .../module_utils/netapp_elementsw_module.py | 41 + .../ontap/plugins/module_utils/netapp_ipaddress.py | 134 + .../ontap/plugins/module_utils/netapp_module.py | 619 ++ .../ontap/plugins/module_utils/rest_application.py | 180 + .../ontap/plugins/module_utils/rest_generic.py | 101 + .../plugins/module_utils/rest_owning_resource.py | 26 + .../plugins/module_utils/rest_response_helpers.py | 137 + .../netapp/ontap/plugins/module_utils/rest_user.py | 49 + .../ontap/plugins/module_utils/rest_volume.py | 61 + .../ontap/plugins/module_utils/rest_vserver.py | 61 + .../netapp/ontap/plugins/module_utils/zapis_svm.py | 133 + .../plugins/modules/na_ontap_active_directory.py | 328 + ...na_ontap_active_directory_domain_controllers.py | 221 + .../ontap/plugins/modules/na_ontap_aggregate.py | 1121 ++++ .../ontap/plugins/modules/na_ontap_autosupport.py | 449 ++ .../plugins/modules/na_ontap_autosupport_invoke.py | 188 + .../plugins/modules/na_ontap_bgp_peer_group.py | 356 + .../plugins/modules/na_ontap_broadcast_domain.py | 690 ++ .../modules/na_ontap_broadcast_domain_ports.py | 224 + .../ontap/plugins/modules/na_ontap_cg_snapshot.py | 229 + .../netapp/ontap/plugins/modules/na_ontap_cifs.py | 563 ++ .../ontap/plugins/modules/na_ontap_cifs_acl.py | 351 + .../plugins/modules/na_ontap_cifs_local_group.py | 235 + .../modules/na_ontap_cifs_local_group_member.py | 292 + .../plugins/modules/na_ontap_cifs_local_user.py | 244 + .../modules/na_ontap_cifs_local_user_modify.py | 235 + .../na_ontap_cifs_local_user_set_password.py | 162 + .../ontap/plugins/modules/na_ontap_cifs_server.py | 619 ++ .../ontap/plugins/modules/na_ontap_cluster.py | 776 +++ .../ontap/plugins/modules/na_ontap_cluster_ha.py | 151 + .../ontap/plugins/modules/na_ontap_cluster_peer.py | 427 ++ .../ontap/plugins/modules/na_ontap_command.py | 290 + .../netapp/ontap/plugins/modules/na_ontap_debug.py | 258 + .../ontap/plugins/modules/na_ontap_disk_options.py | 175 + .../netapp/ontap/plugins/modules/na_ontap_disks.py | 386 ++ .../netapp/ontap/plugins/modules/na_ontap_dns.py | 368 + .../plugins/modules/na_ontap_domain_tunnel.py | 168 + .../plugins/modules/na_ontap_efficiency_policy.py | 414 ++ .../plugins/modules/na_ontap_ems_destination.py | 199 + .../ontap/plugins/modules/na_ontap_ems_filter.py | 250 + .../plugins/modules/na_ontap_export_policy.py | 274 + .../plugins/modules/na_ontap_export_policy_rule.py | 745 +++ .../netapp/ontap/plugins/modules/na_ontap_fcp.py | 275 + .../netapp/ontap/plugins/modules/na_ontap_fdsd.py | 173 + .../netapp/ontap/plugins/modules/na_ontap_fdsp.py | 171 + .../netapp/ontap/plugins/modules/na_ontap_fdspt.py | 257 + .../netapp/ontap/plugins/modules/na_ontap_fdss.py | 123 + .../modules/na_ontap_file_directory_policy.py | 363 + .../modules/na_ontap_file_security_permissions.py | 760 +++ .../na_ontap_file_security_permissions_acl.py | 495 ++ .../plugins/modules/na_ontap_firewall_policy.py | 325 + .../plugins/modules/na_ontap_firmware_upgrade.py | 873 +++ .../ontap/plugins/modules/na_ontap_flexcache.py | 672 ++ .../plugins/modules/na_ontap_fpolicy_event.py | 444 ++ .../plugins/modules/na_ontap_fpolicy_ext_engine.py | 520 ++ .../plugins/modules/na_ontap_fpolicy_policy.py | 378 ++ .../plugins/modules/na_ontap_fpolicy_scope.py | 516 ++ .../plugins/modules/na_ontap_fpolicy_status.py | 283 + .../ontap/plugins/modules/na_ontap_igroup.py | 697 ++ .../plugins/modules/na_ontap_igroup_initiator.py | 221 + .../netapp/ontap/plugins/modules/na_ontap_info.py | 1825 +++++ .../ontap/plugins/modules/na_ontap_interface.py | 1457 ++++ .../ontap/plugins/modules/na_ontap_ipspace.py | 286 + .../netapp/ontap/plugins/modules/na_ontap_iscsi.py | 329 + .../plugins/modules/na_ontap_iscsi_security.py | 350 + .../ontap/plugins/modules/na_ontap_job_schedule.py | 477 ++ .../plugins/modules/na_ontap_kerberos_interface.py | 225 + .../plugins/modules/na_ontap_kerberos_realm.py | 438 ++ .../netapp/ontap/plugins/modules/na_ontap_ldap.py | 221 + .../ontap/plugins/modules/na_ontap_ldap_client.py | 550 ++ .../ontap/plugins/modules/na_ontap_license.py | 708 ++ .../ontap/plugins/modules/na_ontap_local_hosts.py | 197 + .../ontap/plugins/modules/na_ontap_log_forward.py | 312 + .../plugins/modules/na_ontap_login_messages.py | 307 + .../netapp/ontap/plugins/modules/na_ontap_lun.py | 1270 ++++ .../ontap/plugins/modules/na_ontap_lun_copy.py | 221 + .../ontap/plugins/modules/na_ontap_lun_map.py | 356 + .../modules/na_ontap_lun_map_reporting_nodes.py | 274 + .../ontap/plugins/modules/na_ontap_mcc_mediator.py | 185 + .../ontap/plugins/modules/na_ontap_metrocluster.py | 171 + .../modules/na_ontap_metrocluster_dr_group.py | 223 + .../netapp/ontap/plugins/modules/na_ontap_motd.py | 210 + .../plugins/modules/na_ontap_name_mappings.py | 286 + .../modules/na_ontap_name_service_switch.py | 250 + .../netapp/ontap/plugins/modules/na_ontap_ndmp.py | 392 ++ .../ontap/plugins/modules/na_ontap_net_ifgrp.py | 546 ++ .../ontap/plugins/modules/na_ontap_net_port.py | 309 + .../ontap/plugins/modules/na_ontap_net_routes.py | 354 + .../ontap/plugins/modules/na_ontap_net_subnet.py | 426 ++ .../ontap/plugins/modules/na_ontap_net_vlan.py | 367 + .../netapp/ontap/plugins/modules/na_ontap_nfs.py | 700 ++ .../netapp/ontap/plugins/modules/na_ontap_node.py | 265 + .../ontap/plugins/modules/na_ontap_ntfs_dacl.py | 355 + .../ontap/plugins/modules/na_ontap_ntfs_sd.py | 288 + .../netapp/ontap/plugins/modules/na_ontap_ntp.py | 271 + .../ontap/plugins/modules/na_ontap_ntp_key.py | 159 + .../netapp/ontap/plugins/modules/na_ontap_nvme.py | 250 + .../plugins/modules/na_ontap_nvme_namespace.py | 256 + .../plugins/modules/na_ontap_nvme_subsystem.py | 463 ++ .../ontap/plugins/modules/na_ontap_object_store.py | 360 + .../ontap/plugins/modules/na_ontap_partitions.py | 415 ++ .../netapp/ontap/plugins/modules/na_ontap_ports.py | 583 ++ .../ontap/plugins/modules/na_ontap_portset.py | 423 ++ .../ontap/plugins/modules/na_ontap_publickey.py | 302 + .../modules/na_ontap_qos_adaptive_policy_group.py | 323 + .../plugins/modules/na_ontap_qos_policy_group.py | 579 ++ .../netapp/ontap/plugins/modules/na_ontap_qtree.py | 462 ++ .../ontap/plugins/modules/na_ontap_quota_policy.py | 257 + .../ontap/plugins/modules/na_ontap_quotas.py | 890 +++ .../ontap/plugins/modules/na_ontap_rest_cli.py | 156 + .../ontap/plugins/modules/na_ontap_rest_info.py | 1138 ++++ .../ontap/plugins/modules/na_ontap_restit.py | 393 ++ .../ontap/plugins/modules/na_ontap_s3_buckets.py | 586 ++ .../ontap/plugins/modules/na_ontap_s3_groups.py | 234 + .../ontap/plugins/modules/na_ontap_s3_policies.py | 246 + .../ontap/plugins/modules/na_ontap_s3_services.py | 219 + .../ontap/plugins/modules/na_ontap_s3_users.py | 193 + .../modules/na_ontap_security_certificates.py | 468 ++ .../plugins/modules/na_ontap_security_config.py | 285 + .../na_ontap_security_ipsec_ca_certificate.py | 184 + .../modules/na_ontap_security_ipsec_config.py | 127 + .../modules/na_ontap_security_ipsec_policy.py | 458 ++ .../modules/na_ontap_security_key_manager.py | 640 ++ .../ontap/plugins/modules/na_ontap_security_ssh.py | 197 + .../plugins/modules/na_ontap_service_policy.py | 339 + .../modules/na_ontap_service_processor_network.py | 391 ++ .../plugins/modules/na_ontap_snaplock_clock.py | 177 + .../ontap/plugins/modules/na_ontap_snapmirror.py | 1749 +++++ .../plugins/modules/na_ontap_snapmirror_policy.py | 1038 +++ .../ontap/plugins/modules/na_ontap_snapshot.py | 437 ++ .../plugins/modules/na_ontap_snapshot_policy.py | 742 +++ .../netapp/ontap/plugins/modules/na_ontap_snmp.py | 235 + .../plugins/modules/na_ontap_snmp_traphosts.py | 126 + .../plugins/modules/na_ontap_software_update.py | 722 ++ .../ontap/plugins/modules/na_ontap_ssh_command.py | 254 + .../modules/na_ontap_storage_auto_giveback.py | 248 + .../plugins/modules/na_ontap_storage_failover.py | 208 + .../netapp/ontap/plugins/modules/na_ontap_svm.py | 939 +++ .../ontap/plugins/modules/na_ontap_svm_options.py | 163 + .../ontap/plugins/modules/na_ontap_ucadapter.py | 303 + .../ontap/plugins/modules/na_ontap_unix_group.py | 459 ++ .../ontap/plugins/modules/na_ontap_unix_user.py | 330 + .../netapp/ontap/plugins/modules/na_ontap_user.py | 854 +++ .../ontap/plugins/modules/na_ontap_user_role.py | 522 ++ .../ontap/plugins/modules/na_ontap_volume.py | 2902 ++++++++ .../plugins/modules/na_ontap_volume_autosize.py | 353 + .../ontap/plugins/modules/na_ontap_volume_clone.py | 355 + .../plugins/modules/na_ontap_volume_efficiency.py | 715 ++ .../plugins/modules/na_ontap_volume_snaplock.py | 227 + .../netapp/ontap/plugins/modules/na_ontap_vscan.py | 168 + .../modules/na_ontap_vscan_on_access_policy.py | 524 ++ .../modules/na_ontap_vscan_on_demand_task.py | 407 ++ .../plugins/modules/na_ontap_vscan_scanner_pool.py | 297 + .../plugins/modules/na_ontap_vserver_audit.py | 373 ++ .../modules/na_ontap_vserver_cifs_security.py | 310 + .../ontap/plugins/modules/na_ontap_vserver_peer.py | 446 ++ .../modules/na_ontap_vserver_peer_permissions.py | 201 + .../plugins/modules/na_ontap_wait_for_condition.py | 402 ++ .../ontap/plugins/modules/na_ontap_wwpn_alias.py | 194 + .../netapp/ontap/plugins/modules/na_ontap_zapit.py | 315 + ansible_collections/netapp/ontap/requirements.txt | 9 + .../ontap/roles/na_ontap_cluster_config/LICENSE | 674 ++ .../ontap/roles/na_ontap_cluster_config/README.md | 131 + .../na_ontap_cluster_config/defaults/main.yml | 25 + .../na_ontap_cluster_config/handlers/main.yml | 2 + .../roles/na_ontap_cluster_config/meta/main.yml | 9 + .../roles/na_ontap_cluster_config/tasks/main.yml | 210 + .../roles/na_ontap_cluster_config/tests/inventory | 2 + .../roles/na_ontap_cluster_config/tests/test.yml | 5 + .../roles/na_ontap_cluster_config/vars/main.yml | 2 + .../netapp/ontap/roles/na_ontap_nas_create/LICENSE | 674 ++ .../ontap/roles/na_ontap_nas_create/README.md | 65 + .../roles/na_ontap_nas_create/defaults/main.yml | 7 + .../roles/na_ontap_nas_create/handlers/main.yml | 2 + .../ontap/roles/na_ontap_nas_create/meta/main.yml | 9 + .../ontap/roles/na_ontap_nas_create/tasks/main.yml | 63 + .../roles/na_ontap_nas_create/tests/inventory | 2 + .../ontap/roles/na_ontap_nas_create/tests/test.yml | 5 + .../ontap/roles/na_ontap_nas_create/vars/main.yml | 2 + .../netapp/ontap/roles/na_ontap_san_create/LICENSE | 674 ++ .../ontap/roles/na_ontap_san_create/README.md | 67 + .../roles/na_ontap_san_create/defaults/main.yml | 8 + .../roles/na_ontap_san_create/handlers/main.yml | 2 + .../ontap/roles/na_ontap_san_create/meta/main.yml | 9 + .../ontap/roles/na_ontap_san_create/tasks/main.yml | 65 + .../roles/na_ontap_san_create/tests/inventory | 2 + .../ontap/roles/na_ontap_san_create/tests/test.yml | 5 + .../ontap/roles/na_ontap_san_create/vars/main.yml | 2 + .../roles/na_ontap_snapmirror_create/README.md | 70 + .../na_ontap_snapmirror_create/defaults/main.yml | 13 + .../na_ontap_snapmirror_create/handlers/main.yml | 2 + .../roles/na_ontap_snapmirror_create/meta/main.yml | 9 + .../na_ontap_snapmirror_create/tasks/main.yml | 55 + .../na_ontap_snapmirror_create/tests/inventory | 2 + .../na_ontap_snapmirror_create/tests/test.yml | 5 + .../roles/na_ontap_snapmirror_create/vars/main.yml | 2 + .../ontap/roles/na_ontap_vserver_create/LICENSE | 674 ++ .../ontap/roles/na_ontap_vserver_create/README.md | 113 + .../na_ontap_vserver_create/defaults/main.yml | 14 + .../na_ontap_vserver_create/handlers/main.yml | 2 + .../roles/na_ontap_vserver_create/meta/main.yml | 8 + .../roles/na_ontap_vserver_create/tasks/main.yml | 198 + .../roles/na_ontap_vserver_create/tests/inventory | 2 + .../roles/na_ontap_vserver_create/tests/test.yml | 5 + .../roles/na_ontap_vserver_create/vars/main.yml | 2 + .../roles/na_ontap_vserver_delete/.travis.yml | 29 + .../ontap/roles/na_ontap_vserver_delete/README.md | 88 + .../na_ontap_vserver_delete/defaults/main.yml | 11 + .../na_ontap_vserver_delete/handlers/main.yml | 2 + .../roles/na_ontap_vserver_delete/meta/main.yml | 9 + .../tasks/assert_prereqs_and_vserver_exists.yml | 66 + .../tasks/delete_volumes.yml | 23 + .../tasks/find_and_delete_volumes.yml | 31 + .../tasks/find_and_delete_volumes_retries.yml | 8 + .../tasks/get_cifs_server.yml | 31 + .../na_ontap_vserver_delete/tasks/get_igroups.yml | 31 + .../tasks/get_interfaces.yml | 31 + .../na_ontap_vserver_delete/tasks/get_volumes.yml | 33 + .../roles/na_ontap_vserver_delete/tasks/main.yml | 166 + .../roles/na_ontap_vserver_delete/tests/inventory | 2 + .../roles/na_ontap_vserver_delete/tests/test.yml | 24 + .../roles/na_ontap_vserver_delete/vars/main.yml | 2 + .../netapp/ontap/tests/sanity/ignore-2.10.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.11.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.12.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.13.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.14.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.15.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.16.txt | 4 + .../netapp/ontap/tests/sanity/ignore-2.9.txt | 1 + .../netapp/ontap/tests/unit/compat/__init__.py | 0 .../netapp/ontap/tests/unit/compat/builtins.py | 34 + .../netapp/ontap/tests/unit/compat/mock.py | 122 + .../netapp/ontap/tests/unit/compat/unittest.py | 44 + .../unit/framework/mock_rest_and_zapi_requests.py | 288 + .../ontap/tests/unit/framework/rest_factory.py | 107 + .../framework/test_mock_rest_and_zapi_requests.py | 189 + ...st_mock_rest_and_zapi_requests_no_netapp_lib.py | 94 + .../tests/unit/framework/test_rest_factory.py | 44 + .../tests/unit/framework/test_zapi_factory.py | 108 + .../ontap/tests/unit/framework/ut_utilities.py | 31 + .../ontap/tests/unit/framework/zapi_factory.py | 148 + .../unit/plugins/filter/test_na_filter_iso8601.py | 66 + .../unit/plugins/module_utils/ansible_mocks.py | 181 + .../tests/unit/plugins/module_utils/test_netapp.py | 182 + .../module_utils/test_netapp_invoke_elem.py | 154 + .../plugins/module_utils/test_netapp_ipaddress.py | 95 + .../plugins/module_utils/test_netapp_module.py | 885 +++ .../unit/plugins/module_utils/test_netapp_rest.py | 586 ++ .../module_utils/test_netapp_send_request.py | 271 + .../unit/plugins/module_utils/test_netapp_sf.py | 85 + .../unit/plugins/module_utils/test_netapp_zapi.py | 374 ++ .../plugins/module_utils/test_response_helper.py | 156 + .../plugins/module_utils/test_rest_application.py | 346 + .../unit/plugins/module_utils/test_rest_generic.py | 492 ++ .../module_utils/test_rest_owning_resource.py | 98 + .../unit/plugins/module_utils/test_rest_volume.py | 233 + .../unit/plugins/module_utils/test_rest_vserver.py | 120 + .../unit/plugins/modules/.pytest_cache/.gitignore | 2 + .../plugins/modules/.pytest_cache/CACHEDIR.TAG | 4 + .../unit/plugins/modules/.pytest_cache/README.md | 8 + .../modules/.pytest_cache/v/cache/lastfailed | 3 + .../plugins/modules/.pytest_cache/v/cache/nodeids | 6 + .../plugins/modules/.pytest_cache/v/cache/stepwise | 1 + .../modules/test_na_ontap_active_directory.py | 311 + ...na_ontap_active_directory_domain_controllers.py | 177 + .../plugins/modules/test_na_ontap_aggregate.py | 627 ++ .../modules/test_na_ontap_aggregate_rest.py | 616 ++ .../plugins/modules/test_na_ontap_autosupport.py | 264 + .../modules/test_na_ontap_autosupport_invoke.py | 103 + .../modules/test_na_ontap_bgp_peer_group.py | 211 + .../modules/test_na_ontap_broadcast_domain.py | 808 +++ .../plugins/modules/test_na_ontap_cg_snapshot.py | 81 + .../unit/plugins/modules/test_na_ontap_cifs.py | 464 ++ .../unit/plugins/modules/test_na_ontap_cifs_acl.py | 412 ++ .../modules/test_na_ontap_cifs_local_group.py | 218 + .../test_na_ontap_cifs_local_group_member.py | 338 + .../modules/test_na_ontap_cifs_local_user.py | 204 + .../test_na_ontap_cifs_local_user_modify.py | 223 + .../test_na_ontap_cifs_local_user_set_password.py | 66 + ...t_na_ontap_cifs_local_user_set_password_rest.py | 101 + .../plugins/modules/test_na_ontap_cifs_server.py | 770 +++ .../unit/plugins/modules/test_na_ontap_cluster.py | 688 ++ .../plugins/modules/test_na_ontap_cluster_ha.py | 140 + .../plugins/modules/test_na_ontap_cluster_peer.py | 305 + .../unit/plugins/modules/test_na_ontap_command.py | 246 + .../unit/plugins/modules/test_na_ontap_debug.py | 344 + .../plugins/modules/test_na_ontap_disk_options.py | 151 + .../unit/plugins/modules/test_na_ontap_disks.py | 822 +++ .../unit/plugins/modules/test_na_ontap_dns.py | 388 ++ .../plugins/modules/test_na_ontap_domain_tunnel.py | 145 + .../modules/test_na_ontap_efficiency_policy.py | 422 ++ .../modules/test_na_ontap_ems_destination.py | 226 + .../plugins/modules/test_na_ontap_ems_filter.py | 308 + .../plugins/modules/test_na_ontap_export_policy.py | 277 + .../modules/test_na_ontap_export_policy_rule.py | 404 ++ .../test_na_ontap_export_policy_rule_rest.py | 387 ++ .../unit/plugins/modules/test_na_ontap_fcp_rest.py | 231 + .../unit/plugins/modules/test_na_ontap_fdsd.py | 136 + .../unit/plugins/modules/test_na_ontap_fdsp.py | 134 + .../unit/plugins/modules/test_na_ontap_fdss.py | 102 + .../modules/test_na_ontap_file_directory_policy.py | 136 + .../test_na_ontap_file_security_permissions.py | 647 ++ .../test_na_ontap_file_security_permissions_acl.py | 331 + .../modules/test_na_ontap_firewall_policy.py | 263 + .../modules/test_na_ontap_firmware_upgrade.py | 891 +++ .../plugins/modules/test_na_ontap_flexcache.py | 838 +++ .../plugins/modules/test_na_ontap_fpolicy_event.py | 338 + .../modules/test_na_ontap_fpolicy_ext_engine.py | 395 ++ .../modules/test_na_ontap_fpolicy_policy.py | 339 + .../plugins/modules/test_na_ontap_fpolicy_scope.py | 351 + .../modules/test_na_ontap_fpolicy_status.py | 286 + .../unit/plugins/modules/test_na_ontap_igroup.py | 415 ++ .../modules/test_na_ontap_igroup_initiator.py | 256 + .../unit/plugins/modules/test_na_ontap_info.py | 738 ++ .../plugins/modules/test_na_ontap_interface.py | 1778 +++++ .../unit/plugins/modules/test_na_ontap_ipspace.py | 189 + .../unit/plugins/modules/test_na_ontap_iscsi.py | 339 + .../modules/test_na_ontap_iscsi_security.py | 195 + .../plugins/modules/test_na_ontap_job_schedule.py | 451 ++ .../modules/test_na_ontap_kerberos_interface.py | 107 + .../modules/test_na_ontap_kerberos_realm.py | 213 + .../plugins/modules/test_na_ontap_ldap_client.py | 481 ++ .../unit/plugins/modules/test_na_ontap_license.py | 432 ++ .../plugins/modules/test_na_ontap_license_nlf.py | 461 ++ .../plugins/modules/test_na_ontap_local_hosts.py | 178 + .../plugins/modules/test_na_ontap_log_forward.py | 343 + .../modules/test_na_ontap_login_messages.py | 332 + .../unit/plugins/modules/test_na_ontap_lun.py | 308 + .../plugins/modules/test_na_ontap_lun_app_rest.py | 584 ++ .../unit/plugins/modules/test_na_ontap_lun_copy.py | 113 + .../unit/plugins/modules/test_na_ontap_lun_map.py | 159 + .../test_na_ontap_lun_map_reporting_nodes.py | 170 + .../plugins/modules/test_na_ontap_lun_map_rest.py | 200 + .../unit/plugins/modules/test_na_ontap_lun_rest.py | 558 ++ .../plugins/modules/test_na_ontap_mcc_mediator.py | 124 + .../plugins/modules/test_na_ontap_metrocluster.py | 117 + .../modules/test_na_ontap_metrocluster_dr_group.py | 164 + .../unit/plugins/modules/test_na_ontap_motd.py | 164 + .../plugins/modules/test_na_ontap_name_mappings.py | 282 + .../modules/test_na_ontap_name_service_switch.py | 181 + .../unit/plugins/modules/test_na_ontap_ndmp.py | 196 + .../plugins/modules/test_na_ontap_net_ifgrp.py | 737 ++ .../unit/plugins/modules/test_na_ontap_net_port.py | 331 + .../plugins/modules/test_na_ontap_net_routes.py | 359 + .../plugins/modules/test_na_ontap_net_subnet.py | 275 + .../unit/plugins/modules/test_na_ontap_net_vlan.py | 252 + .../unit/plugins/modules/test_na_ontap_nfs.py | 338 + .../unit/plugins/modules/test_na_ontap_nfs_rest.py | 324 + .../unit/plugins/modules/test_na_ontap_node.py | 222 + .../plugins/modules/test_na_ontap_ntfs_dacl.py | 232 + .../unit/plugins/modules/test_na_ontap_ntfs_sd.py | 189 + .../unit/plugins/modules/test_na_ontap_ntp.py | 143 + .../unit/plugins/modules/test_na_ontap_ntp_key.py | 141 + .../unit/plugins/modules/test_na_ontap_nvme.py | 185 + .../modules/test_na_ontap_nvme_namespace.py | 168 + .../modules/test_na_ontap_nvme_namespace_rest.py | 121 + .../plugins/modules/test_na_ontap_nvme_rest.py | 131 + .../modules/test_na_ontap_nvme_subsystem.py | 225 + .../modules/test_na_ontap_nvme_subsystem_rest.py | 256 + .../plugins/modules/test_na_ontap_object_store.py | 538 ++ .../plugins/modules/test_na_ontap_partitions.py | 515 ++ .../unit/plugins/modules/test_na_ontap_ports.py | 864 +++ .../unit/plugins/modules/test_na_ontap_portset.py | 390 ++ .../plugins/modules/test_na_ontap_publickey.py | 471 ++ .../test_na_ontap_qos_adaptive_policy_group.py | 313 + .../modules/test_na_ontap_qos_policy_group.py | 578 ++ .../unit/plugins/modules/test_na_ontap_qtree.py | 404 ++ .../plugins/modules/test_na_ontap_quota_policy.py | 174 + .../unit/plugins/modules/test_na_ontap_quotas.py | 853 +++ .../unit/plugins/modules/test_na_ontap_rest_cli.py | 128 + .../plugins/modules/test_na_ontap_rest_info.py | 1195 ++++ .../unit/plugins/modules/test_na_ontap_restit.py | 346 + .../plugins/modules/test_na_ontap_s3_buckets.py | 739 +++ .../plugins/modules/test_na_ontap_s3_groups.py | 319 + .../plugins/modules/test_na_ontap_s3_policies.py | 220 + .../plugins/modules/test_na_ontap_s3_services.py | 176 + .../unit/plugins/modules/test_na_ontap_s3_users.py | 194 + .../modules/test_na_ontap_security_certificates.py | 509 ++ .../modules/test_na_ontap_security_config.py | 254 + .../test_na_ontap_security_ipsec_ca_certificate.py | 140 + .../modules/test_na_ontap_security_ipsec_config.py | 87 + .../modules/test_na_ontap_security_ipsec_policy.py | 268 + .../modules/test_na_ontap_security_key_manager.py | 804 +++ .../plugins/modules/test_na_ontap_security_ssh.py | 164 + .../modules/test_na_ontap_service_policy.py | 402 ++ .../test_na_ontap_service_processor_network.py | 296 + .../modules/test_na_ontap_snaplock_clock.py | 228 + .../plugins/modules/test_na_ontap_snapmirror.py | 1894 ++++++ .../modules/test_na_ontap_snapmirror_policy.py | 1269 ++++ .../unit/plugins/modules/test_na_ontap_snapshot.py | 363 + .../modules/test_na_ontap_snapshot_policy.py | 658 ++ .../modules/test_na_ontap_snapshot_policy_rest.py | 481 ++ .../unit/plugins/modules/test_na_ontap_snmp.py | 158 + .../modules/test_na_ontap_snmp_traphosts.py | 153 + .../modules/test_na_ontap_software_update.py | 1124 ++++ .../modules/test_na_ontap_storage_auto_giveback.py | 320 + .../modules/test_na_ontap_storage_failover.py | 350 + .../unit/plugins/modules/test_na_ontap_svm.py | 1251 ++++ .../unit/plugins/modules/test_na_ontap_template.py | 86 + .../plugins/modules/test_na_ontap_ucadapter.py | 173 + .../plugins/modules/test_na_ontap_unix_group.py | 545 ++ .../plugins/modules/test_na_ontap_unix_user.py | 465 ++ .../unit/plugins/modules/test_na_ontap_user.py | 744 +++ .../plugins/modules/test_na_ontap_user_dicts.py | 589 ++ .../plugins/modules/test_na_ontap_user_role.py | 139 + .../modules/test_na_ontap_user_role_rest.py | 647 ++ .../unit/plugins/modules/test_na_ontap_volume.py | 2011 ++++++ .../modules/test_na_ontap_volume_autosize.py | 367 + .../plugins/modules/test_na_ontap_volume_clone.py | 210 + .../modules/test_na_ontap_volume_clone_rest.py | 244 + .../modules/test_na_ontap_volume_efficiency.py | 346 + .../plugins/modules/test_na_ontap_volume_rest.py | 1440 ++++ .../modules/test_na_ontap_volume_snaplock.py | 131 + .../unit/plugins/modules/test_na_ontap_vscan.py | 200 + .../test_na_ontap_vscan_on_access_policy.py | 348 + .../modules/test_na_ontap_vscan_on_demand_task.py | 135 + .../test_na_ontap_vscan_on_demand_task_rest.py | 184 + .../modules/test_na_ontap_vscan_scanner_pool.py | 154 + .../plugins/modules/test_na_ontap_vserver_audit.py | 354 + .../modules/test_na_ontap_vserver_cifs_security.py | 111 + .../plugins/modules/test_na_ontap_vserver_peer.py | 440 ++ .../test_na_ontap_vserver_peer_permissions.py | 226 + .../modules/test_na_ontap_wait_for_condition.py | 485 ++ .../plugins/modules/test_na_ontap_wwpn_alias.py | 192 + .../unit/plugins/modules/test_na_ontap_zapit.py | 255 + .../tests/unit/plugins/modules/test_ontap_fdspt.py | 164 + .../netapp/ontap/tests/unit/requirements.txt | 7 + .../storagegrid/.github/workflows/coverage.yml | 45 + .../netapp/storagegrid/.github/workflows/main.yml | 48 + .../netapp/storagegrid/CHANGELOG.rst | 172 + ansible_collections/netapp/storagegrid/COPYING | 674 ++ ansible_collections/netapp/storagegrid/FILES.json | 572 ++ .../netapp/storagegrid/MANIFEST.json | 32 + ansible_collections/netapp/storagegrid/README.md | 199 + .../netapp/storagegrid/changelogs/changelog.yaml | 171 + .../netapp/storagegrid/changelogs/config.yaml | 32 + .../storagegrid/changelogs/fragments/20.10.0.yaml | 14 + .../storagegrid/changelogs/fragments/20.6.1.yaml | 4 + .../storagegrid/changelogs/fragments/20.7.0.yaml | 2 + .../storagegrid/changelogs/fragments/21.11.0.yaml | 2 + .../storagegrid/changelogs/fragments/21.11.1.yaml | 2 + .../storagegrid/changelogs/fragments/21.6.0.yaml | 4 + .../storagegrid/changelogs/fragments/21.9.0.yaml | 6 + .../changelogs/fragments/DEVOPS-4416.yaml | 2 + .../changelogs/fragments/github-10.yaml | 2 + .../changelogs/fragments/github-66.yaml | 2 + .../storagegrid/changelogs/fragments/github-8.yaml | 2 + .../netapp/storagegrid/meta/runtime.yml | 23 + .../storagegrid/plugins/doc_fragments/netapp.py | 41 + .../storagegrid/plugins/module_utils/netapp.py | 211 + .../plugins/module_utils/netapp_module.py | 237 + .../plugins/modules/na_sg_grid_account.py | 458 ++ .../plugins/modules/na_sg_grid_certificate.py | 226 + .../modules/na_sg_grid_client_certificate.py | 265 + .../storagegrid/plugins/modules/na_sg_grid_dns.py | 163 + .../plugins/modules/na_sg_grid_gateway.py | 532 ++ .../plugins/modules/na_sg_grid_group.py | 341 + .../plugins/modules/na_sg_grid_ha_group.py | 334 + .../modules/na_sg_grid_identity_federation.py | 335 + .../storagegrid/plugins/modules/na_sg_grid_info.py | 405 ++ .../storagegrid/plugins/modules/na_sg_grid_ntp.py | 173 + .../plugins/modules/na_sg_grid_regions.py | 163 + .../plugins/modules/na_sg_grid_traffic_classes.py | 375 ++ .../storagegrid/plugins/modules/na_sg_grid_user.py | 316 + .../plugins/modules/na_sg_org_container.py | 352 + .../storagegrid/plugins/modules/na_sg_org_group.py | 301 + .../modules/na_sg_org_identity_federation.py | 335 + .../storagegrid/plugins/modules/na_sg_org_info.py | 279 + .../storagegrid/plugins/modules/na_sg_org_user.py | 335 + .../plugins/modules/na_sg_org_user_s3_key.py | 210 + .../netapp/storagegrid/requirements.txt | 1 + .../storagegrid/tests/unit/compat/__init__.py | 0 .../storagegrid/tests/unit/compat/builtins.py | 34 + .../netapp/storagegrid/tests/unit/compat/mock.py | 125 + .../storagegrid/tests/unit/compat/unittest.py | 44 + .../plugins/modules/test_na_sg_grid_account.py | 380 ++ .../plugins/modules/test_na_sg_grid_certificate.py | 342 + .../modules/test_na_sg_grid_client_certificate.py | 347 + .../unit/plugins/modules/test_na_sg_grid_dns.py | 241 + .../plugins/modules/test_na_sg_grid_gateway.py | 693 ++ .../unit/plugins/modules/test_na_sg_grid_group.py | 317 + .../plugins/modules/test_na_sg_grid_ha_group.py | 408 ++ .../modules/test_na_sg_grid_identity_federation.py | 354 + .../unit/plugins/modules/test_na_sg_grid_info.py | 362 + .../unit/plugins/modules/test_na_sg_grid_ntp.py | 257 + .../plugins/modules/test_na_sg_grid_regions.py | 206 + .../modules/test_na_sg_grid_traffic_classes.py | 355 + .../unit/plugins/modules/test_na_sg_grid_user.py | 476 ++ .../plugins/modules/test_na_sg_org_container.py | 348 + .../unit/plugins/modules/test_na_sg_org_group.py | 403 ++ .../modules/test_na_sg_org_identity_federation.py | 354 + .../unit/plugins/modules/test_na_sg_org_info.py | 263 + .../unit/plugins/modules/test_na_sg_org_user.py | 476 ++ .../plugins/modules/test_na_sg_org_user_s3_key.py | 238 + .../netapp/storagegrid/tests/unit/requirements.txt | 1 + .../um_info/.github/ISSUE_TEMPLATE/bug_report.yml | 210 + .../.github/ISSUE_TEMPLATE/feature_request.yml | 100 + .../netapp/um_info/.github/workflows/coverage.yml | 45 + .../netapp/um_info/.github/workflows/main.yml | 47 + ansible_collections/netapp/um_info/CHANGELOG.rst | 78 + ansible_collections/netapp/um_info/COPYING | 674 ++ ansible_collections/netapp/um_info/FILES.json | 418 ++ ansible_collections/netapp/um_info/MANIFEST.json | 34 + ansible_collections/netapp/um_info/README.md | 83 + .../netapp/um_info/changelogs/changelog.yaml | 72 + .../netapp/um_info/changelogs/config.yaml | 32 + .../um_info/changelogs/fragments/20.7.0.yaml | 3 + .../um_info/changelogs/fragments/DEVOPS-2952.yaml | 4 + .../um_info/changelogs/fragments/DEVOPS-3920.yaml | 2 + .../um_info/changelogs/fragments/DEVOPS-3962.yaml | 6 + .../um_info/changelogs/fragments/DEVOPS-4059.yaml | 2 + .../um_info/changelogs/fragments/DEVOPS-4087.yaml | 4 + .../um_info/changelogs/fragments/DEVOPS-4416.yaml | 2 + .../netapp/um_info/meta/runtime.yml | 9 + .../netapp/um_info/plugins/doc_fragments/netapp.py | 74 + .../netapp/um_info/plugins/module_utils/netapp.py | 246 + .../um_info/plugins/module_utils/netapp_module.py | 51 + .../plugins/modules/na_um_aggregates_info.py | 163 + .../um_info/plugins/modules/na_um_clusters_info.py | 152 + .../plugins/modules/na_um_list_aggregates.py | 163 + .../um_info/plugins/modules/na_um_list_clusters.py | 152 + .../um_info/plugins/modules/na_um_list_nodes.py | 145 + .../um_info/plugins/modules/na_um_list_svms.py | 174 + .../um_info/plugins/modules/na_um_list_volumes.py | 133 + .../um_info/plugins/modules/na_um_nodes_info.py | 145 + .../um_info/plugins/modules/na_um_svms_info.py | 174 + .../um_info/plugins/modules/na_um_volumes_info.py | 133 + .../netapp/um_info/requirements.txt | 1 + .../netapp/um_info/tests/unit/compat/__init__.py | 0 .../netapp/um_info/tests/unit/compat/builtins.py | 33 + .../netapp/um_info/tests/unit/compat/mock.py | 122 + .../netapp/um_info/tests/unit/compat/unittest.py | 44 + .../tests/unit/plugins/module_utils/test_netapp.py | 236 + .../plugins/modules/test_na_um_aggregates_info.py | 159 + .../plugins/modules/test_na_um_clusters_info.py | 159 + .../unit/plugins/modules/test_na_um_nodes_info.py | 158 + .../unit/plugins/modules/test_na_um_svms_info.py | 158 + .../plugins/modules/test_na_um_volumes_info.py | 158 + .../netapp/um_info/tests/unit/requirements.txt | 1 + 1349 files changed, 221329 insertions(+) create mode 100644 ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/netapp/aws/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/aws/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/aws/CHANGELOG.rst create mode 100644 ansible_collections/netapp/aws/COPYING create mode 100644 ansible_collections/netapp/aws/FILES.json create mode 100644 ansible_collections/netapp/aws/MANIFEST.json create mode 100644 ansible_collections/netapp/aws/README.md create mode 100644 ansible_collections/netapp/aws/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/config.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/20.2.0.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/20.6.0.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/20.8.0.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/20.9.0.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/2019.10.0.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3569.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3644.yaml create mode 100644 ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-4416.yaml create mode 100644 ansible_collections/netapp/aws/meta/runtime.yml create mode 100644 ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/aws/plugins/module_utils/netapp.py create mode 100644 ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py create mode 100644 ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py create mode 100644 ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py create mode 100644 ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py create mode 100644 ansible_collections/netapp/aws/requirements.txt create mode 100644 ansible_collections/netapp/aws/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/aws/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/aws/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/aws/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/aws/tests/unit/plugins/module_utils/test_netapp.py create mode 100644 ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py create mode 100644 ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py create mode 100644 ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py create mode 100644 ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py create mode 100644 ansible_collections/netapp/aws/tests/unit/requirements.txt create mode 100644 ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/netapp/azure/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/azure/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/azure/CHANGELOG.rst create mode 100644 ansible_collections/netapp/azure/COPYING create mode 100644 ansible_collections/netapp/azure/FILES.json create mode 100644 ansible_collections/netapp/azure/HACK.md create mode 100644 ansible_collections/netapp/azure/MANIFEST.json create mode 100644 ansible_collections/netapp/azure/README.md create mode 100644 ansible_collections/netapp/azure/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/config.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/20.2.0.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/20.4.0.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/20.5.0.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/20.6.0.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/20.7.0.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/20.8.0.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3505.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3526.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3663.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3704.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3849.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3935.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3949.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4001.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4070.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4135.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4246.yaml create mode 100644 ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4416.yaml create mode 100644 ansible_collections/netapp/azure/meta/runtime.yml create mode 100644 ansible_collections/netapp/azure/plugins/doc_fragments/azure.py create mode 100644 ansible_collections/netapp/azure/plugins/doc_fragments/azure_tags.py create mode 100644 ansible_collections/netapp/azure/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/azure/plugins/module_utils/azure_rm_netapp_common.py create mode 100644 ansible_collections/netapp/azure/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_account.py create mode 100644 ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_capacity_pool.py create mode 100644 ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_snapshot.py create mode 100644 ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_volume.py create mode 100644 ansible_collections/netapp/azure/requirements.txt create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/aliases create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/meta/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/tasks/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/aliases create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/meta/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/tasks/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/aliases create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/meta/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/tasks/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/aliases create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/meta/main.yml create mode 100644 ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/tasks/main.yml create mode 100644 ansible_collections/netapp/azure/tests/runner/requirements/integration.cloud.azure.txt create mode 100644 ansible_collections/netapp/azure/tests/runner/requirements/requirements-azure.txt create mode 100644 ansible_collections/netapp/azure/tests/runner/requirements/unit.cloud.azure.txt create mode 100644 ansible_collections/netapp/azure/tests/runner/requirements/units.txt create mode 100644 ansible_collections/netapp/azure/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/azure/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/azure/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/azure/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/azure/tests/unit/plugins/module_utils/test_netapp_module.py create mode 100644 ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_account.py create mode 100644 ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_capacity_pool.py create mode 100644 ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_snapshot.py create mode 100644 ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume.py create mode 100644 ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume_import.py create mode 100644 ansible_collections/netapp/azure/tests/unit/requirements.txt create mode 100644 ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/cloudmanager/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/cloudmanager/CHANGELOG.rst create mode 100644 ansible_collections/netapp/cloudmanager/COPYING create mode 100644 ansible_collections/netapp/cloudmanager/FILES.json create mode 100644 ansible_collections/netapp/cloudmanager/MANIFEST.json create mode 100644 ansible_collections/netapp/cloudmanager/README.md create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/config.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml create mode 100644 ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml create mode 100644 ansible_collections/netapp/cloudmanager/execution_environments/README.md create mode 100644 ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml create mode 100644 ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml create mode 100644 ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml create mode 100644 ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml create mode 100644 ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt create mode 100644 ansible_collections/netapp/cloudmanager/kubectl.sha256 create mode 100644 ansible_collections/netapp/cloudmanager/meta/execution-environment.yml create mode 100644 ansible_collections/netapp/cloudmanager/meta/runtime.yml create mode 100644 ansible_collections/netapp/cloudmanager/plugins/README.md create mode 100644 ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py create mode 100644 ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py create mode 100644 ansible_collections/netapp/cloudmanager/requirements.txt create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt create mode 100644 ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt create mode 100644 ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/netapp/elementsw/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/elementsw/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/elementsw/CHANGELOG.rst create mode 100644 ansible_collections/netapp/elementsw/FILES.json create mode 100644 ansible_collections/netapp/elementsw/MANIFEST.json create mode 100644 ansible_collections/netapp/elementsw/README.md create mode 100644 ansible_collections/netapp/elementsw/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/config.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml create mode 100644 ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml create mode 100644 ansible_collections/netapp/elementsw/meta/runtime.yml create mode 100644 ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py create mode 100644 ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py create mode 100644 ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py create mode 100644 ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py create mode 100644 ansible_collections/netapp/elementsw/requirements.txt create mode 100644 ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py create mode 100644 ansible_collections/netapp/elementsw/tests/unit/requirements.txt create mode 100644 ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/netapp/ontap/.github/workflows/codeql-analysis.yml create mode 100644 ansible_collections/netapp/ontap/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/ontap/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/ontap/CHANGELOG.rst create mode 100644 ansible_collections/netapp/ontap/COPYING create mode 100644 ansible_collections/netapp/ontap/FILES.json create mode 100644 ansible_collections/netapp/ontap/MANIFEST.json create mode 100644 ansible_collections/netapp/ontap/README.md create mode 100644 ansible_collections/netapp/ontap/changelogs/.DS_Store create mode 100644 ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/config.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/0-copy_ignore_txt.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1661.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1665.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1926.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2353.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2422.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459b.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2491.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2928.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2972.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3137.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3148.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3175.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3230.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3241.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3242.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3370.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3439.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3479.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3480.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3483.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3490.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3494.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3497.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3501.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3510.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3515.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3534.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3535.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3536.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3540.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3542.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3543.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3571.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3579.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3580.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3595.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3615.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3623.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3625.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3626.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3628.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3632.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3633.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3649.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3654.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3655.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3662.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3667.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3668.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3671.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3677.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3685.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3716.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3718.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3754.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3757.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3767.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3772.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3801.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3807.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3811.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3812.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3830.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3850.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3870.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3883.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3900.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3926.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3939.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3950.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3952.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3969.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3971.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3973.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3983.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3994.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4005.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4010.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4022.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4026.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4031.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4039.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4048.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4049.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4060.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4079.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4113.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4114.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4116.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4119.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4121.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4122.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4123.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4140.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4150.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4157.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4159.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4161.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4175.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4177.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4179.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4190.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4191.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4196.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4197.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4206.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4218.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4227.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4228.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4231.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4235.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4243.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4255.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4256.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4270.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4288.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4289.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4300.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4312.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4319.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4320.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4325.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4329.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4331.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4332.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4333.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4334.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4335.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4336.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4337.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4338.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4339.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4340.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4341.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4342.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4343.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4344.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4345.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4347.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4348.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4349.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4350.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4367.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4391.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4392.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4393.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4394.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4399.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4401.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4404.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4415.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4417.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4435.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4439.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4449.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4457.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4459.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4460.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4465.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4479.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4487.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4501.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4508.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4526.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4527.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4540.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4554.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4565.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4566.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4568.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4573.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4577.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4588.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4604.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4605.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4606.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4609.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4612.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4621.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4623.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4644.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4645.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4648.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4676.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4679.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4691.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4711.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4716.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4719.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4729.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4731.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4735.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4736.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4737.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4743.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4745.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4747.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4762.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4763.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4764.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4767.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4769.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4770.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4771.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4773.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4774.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4775.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4776.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4779.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4780.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4781.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4784.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4785.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4786.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4788.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4789.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4790.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4794.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4798.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4799.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4800.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4801.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4802.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4803.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4804.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4807.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4808.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4809.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4813.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4818.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4830.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4832.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4834.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4857.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4862.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4863.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4864.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4872.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4879.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4882.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4898.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4975.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4981.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4984.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4985.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4998.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5015.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5016.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5017.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5019.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5026.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5034.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5047.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5062.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5063.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5065.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5068.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5079.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5082.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5084.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5085.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5090.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5092.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5109.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5121.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5127.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5136.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5137.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5138.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5152.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5161.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5168.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5174.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5179.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5188.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5189.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5190.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5195.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5215.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5216.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5220.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5223.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5228.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5229.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5241.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5243.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5246.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5251.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5263.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5268.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5270.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5271.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5275.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5285.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5287.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5297.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5299.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5304.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5310.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5312.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5338.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5344.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5354.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5367.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5380.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5409.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5412.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5413.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5414.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5415.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5426.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5427.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5430.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5431.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5453.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5457.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5479.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5481.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5484.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5485.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5487.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5503.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5504.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5505.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5506.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5507.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5531.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5532.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5536.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5537.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5540.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5548.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5589.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5591.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5592.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5594.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5595.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5596.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5604.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5606.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5611.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5626.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5628.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5629.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5659.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5662.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5665.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5666.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5671.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5677.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5678.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5696.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5711.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5713.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5725.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5733.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5734.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5735.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5737.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5738.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5757.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5760.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5761.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5774.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5784.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5788.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5790.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5807.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5808.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5809.yml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5812.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5816.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5819.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5820.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5844.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5845.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5859.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5892.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5894.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5899.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5910.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5913.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5917.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5919.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5926.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5938.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5948.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5952.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5960.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5972.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5983.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5986.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6001.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6005.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6014.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6015.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6191.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6192.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6193.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6195.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6209.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6233.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6235.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6262.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6266.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/github-110.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml create mode 100644 ansible_collections/netapp/ontap/changelogs/fragments/no-story-1.yaml create mode 100644 ansible_collections/netapp/ontap/execution_environments/README.md create mode 100644 ansible_collections/netapp/ontap/execution_environments/from_galaxy/execution-environment.yml create mode 100644 ansible_collections/netapp/ontap/execution_environments/from_galaxy/requirements.yml create mode 100644 ansible_collections/netapp/ontap/execution_environments/from_github/execution-environment.yml create mode 100644 ansible_collections/netapp/ontap/execution_environments/from_github/requirements.yml create mode 100644 ansible_collections/netapp/ontap/execution_environments/requirements.txt create mode 100644 ansible_collections/netapp/ontap/meta/execution-environment.yml create mode 100644 ansible_collections/netapp/ontap/meta/runtime.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/README.md create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/filter/test_na_filter_iso8601.yaml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes_loop.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/rest_apis/clusters.yaml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/rest_apis/list_aggregates.yaml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/rest_apis/volumes.yml create mode 100644 ansible_collections/netapp/ontap/playbooks/examples/support/debug_connectivity.yaml create mode 100644 ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml create mode 100644 ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml create mode 100644 ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/netapp.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py create mode 100644 ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py create mode 100644 ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py create mode 100644 ansible_collections/netapp/ontap/requirements.txt create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/.travis.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/README.md create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/defaults/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/handlers/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/meta/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/assert_prereqs_and_vserver_exists.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/delete_volumes.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes_retries.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_cifs_server.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_igroups.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_interfaces.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_volumes.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/main.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/inventory create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/test.yml create mode 100644 ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/vars/main.yml create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.14.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.15.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.16.txt create mode 100644 ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/netapp/ontap/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/mock_rest_and_zapi_requests.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/rest_factory.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests_no_netapp_lib.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/test_rest_factory.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/test_zapi_factory.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/ut_utilities.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/framework/zapi_factory.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/filter/test_na_filter_iso8601.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/ansible_mocks.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_invoke_elem.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_ipaddress.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_send_request.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_sf.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_zapi.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_response_helper.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_application.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_generic.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_owning_resource.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_volume.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_vserver.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/.gitignore create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/CACHEDIR.TAG create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/README.md create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/lastfailed create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/nodeids create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/stepwise create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory_domain_controllers.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_bgp_peer_group.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_acl.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group_member.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_modify.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_ha.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_debug.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disk_options.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disks.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_domain_tunnel.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_destination.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_filter.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fcp_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsd.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsp.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdss.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions_acl.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_event.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_ext_engine.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_scope.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_status.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_interface.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license_nlf.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_local_hosts.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_log_forward.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_app_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_reporting_nodes.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_mappings.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_vlan.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_node.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp_key.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_partitions.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_publickey.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_restit.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_buckets.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_groups.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_policies.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_services.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_users.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_config.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_ca_certificate.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_config.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ssh.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snaplock_clock.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_auto_giveback.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_failover.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_dicts.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_efficiency.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task_rest.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_audit.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer_permissions.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wait_for_condition.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_zapit.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_ontap_fdspt.py create mode 100644 ansible_collections/netapp/ontap/tests/unit/requirements.txt create mode 100644 ansible_collections/netapp/storagegrid/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/storagegrid/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/storagegrid/CHANGELOG.rst create mode 100644 ansible_collections/netapp/storagegrid/COPYING create mode 100644 ansible_collections/netapp/storagegrid/FILES.json create mode 100644 ansible_collections/netapp/storagegrid/MANIFEST.json create mode 100644 ansible_collections/netapp/storagegrid/README.md create mode 100644 ansible_collections/netapp/storagegrid/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/config.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/20.10.0.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/20.6.1.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/20.7.0.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.0.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.1.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/21.6.0.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/21.9.0.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/DEVOPS-4416.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/github-10.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/github-66.yaml create mode 100644 ansible_collections/netapp/storagegrid/changelogs/fragments/github-8.yaml create mode 100644 ansible_collections/netapp/storagegrid/meta/runtime.yml create mode 100644 ansible_collections/netapp/storagegrid/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/module_utils/netapp.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_account.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_certificate.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_client_certificate.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_dns.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_gateway.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_group.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ha_group.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_identity_federation.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_info.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ntp.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_regions.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_traffic_classes.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_user.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_container.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_group.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_identity_federation.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_info.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user.py create mode 100644 ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user_s3_key.py create mode 100644 ansible_collections/netapp/storagegrid/requirements.txt create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_account.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_certificate.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_client_certificate.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_dns.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_gateway.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_group.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ha_group.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_identity_federation.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_info.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ntp.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_regions.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_traffic_classes.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_user.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_container.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_group.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_identity_federation.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_info.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user_s3_key.py create mode 100644 ansible_collections/netapp/storagegrid/tests/unit/requirements.txt create mode 100644 ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 ansible_collections/netapp/um_info/.github/workflows/coverage.yml create mode 100644 ansible_collections/netapp/um_info/.github/workflows/main.yml create mode 100644 ansible_collections/netapp/um_info/CHANGELOG.rst create mode 100644 ansible_collections/netapp/um_info/COPYING create mode 100644 ansible_collections/netapp/um_info/FILES.json create mode 100644 ansible_collections/netapp/um_info/MANIFEST.json create mode 100644 ansible_collections/netapp/um_info/README.md create mode 100644 ansible_collections/netapp/um_info/changelogs/changelog.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/config.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/20.7.0.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-2952.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3920.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3962.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4059.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4087.yaml create mode 100644 ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4416.yaml create mode 100644 ansible_collections/netapp/um_info/meta/runtime.yml create mode 100644 ansible_collections/netapp/um_info/plugins/doc_fragments/netapp.py create mode 100644 ansible_collections/netapp/um_info/plugins/module_utils/netapp.py create mode 100644 ansible_collections/netapp/um_info/plugins/module_utils/netapp_module.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_aggregates_info.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_clusters_info.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_list_aggregates.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_list_clusters.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_list_nodes.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_list_svms.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_list_volumes.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_nodes_info.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_svms_info.py create mode 100644 ansible_collections/netapp/um_info/plugins/modules/na_um_volumes_info.py create mode 100644 ansible_collections/netapp/um_info/requirements.txt create mode 100644 ansible_collections/netapp/um_info/tests/unit/compat/__init__.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/compat/builtins.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/compat/mock.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/compat/unittest.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/plugins/module_utils/test_netapp.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_aggregates_info.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_clusters_info.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_nodes_info.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_svms_info.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_volumes_info.py create mode 100644 ansible_collections/netapp/um_info/tests/unit/requirements.txt (limited to 'ansible_collections/netapp') diff --git a/ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..2767dba53 --- /dev/null +++ b/ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,210 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to report a bug in netapp.aws!** + + + ⚠ + Verify first that your issue is not [already reported on + GitHub][issue search] and keep in mind that we may have to keep + the current behavior because [every change breaks someone's + workflow][XKCD 1172]. + We try to be mindful about this. + + Also test if the latest release and devel branch are affected too. + + + **Tip:** If you are seeking community support, please consider + [Join our Slack community][ML||IRC]. + + + + [ML||IRC]: + https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg + + [issue search]: ../search?q=is%3Aissue&type=issues + + [XKCD 1172]: https://xkcd.com/1172/ + + +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with netapp.aws from the devel branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + + + **Tip:** Cannot find it in this repository? Please be advised that + the source for some parts of the documentation are hosted outside + of this repository. If the page you are reporting describes + modules/plugins/etc that are not officially supported by the + Ansible Core Engineering team, there is a good chance that it is + coming from one of the [Ansible Collections maintained by the + community][collections org]. If this is the case, please make sure + to file an issue under the appropriate project there instead. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` below, under + the prompt line. Please don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + value: | + $ ansible --version + placeholder: | + $ ansible --version + ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) + config file = None + configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = ~/src/github/ansible/ansible/lib/ansible + ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections + executable location = bin/ansible + python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] + jinja version = 2.11.3 + libyaml = True + validations: + required: true + +- type: textarea + attributes: + label: CVS for AWS Collection Version + description: >- + CVS for AWS Collection Version. Run `ansible-galaxy collection` and copy the entire output + render: console + value: | + $ ansible-galaxy collection list + validations: + required: true + +- type: textarea + attributes: + label: Playbook + description: >- + The task from the playbook that is give you the issue + render: console + validations: + required: true + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: | + 1. Implement the following playbook: + + ```yaml + --- + # ping.yml + - hosts: all + gather_facts: false + tasks: + - ping: + ... + ``` + 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv` + 3. An error occurs. + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y and was shocked + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output and don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + placeholder: >- + Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))} + Exception: + Traceback (most recent call last): + File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main + status = self.run(options, args) + File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run + wb.build(autobuilding=True) + File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build + self.requirement_set.prepare_files(self.finder) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files + ignore_dependencies=self.ignore_dependencies)) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file + session=self.session, hashes=hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url + hashes=hashes + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url + hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url + stream=True, + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get + return self.request('GET', url, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request + return super(PipSession, self).request(method, url, *args, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request + resp = self.send(prep, **send_kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send + r = adapter.send(request, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send + resp = super(CacheControlAdapter, self).send(request, **kw) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send + raise SSLError(e, request=request) + SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),)) + ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2. + ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1. + validations: + required: true + + +- type: markdown + attributes: + value: > + *One last thing...* + + + Thank you for your collaboration! + + +... diff --git a/ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..c0506a059 --- /dev/null +++ b/ansible_collections/netapp/aws/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,100 @@ +--- +name: ✨ Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to suggest a feature for netapp.aws!** + + 💡 + Before you go ahead with your request, please first consider if it + would be useful for majority of the netapp.aws users. As a + general rule of thumb, any feature that is only of interest to a + small sub group should be [implemented in a third-party Ansible + Collection][contribute to collections] or maybe even just your + project alone. Be mindful of the fact that the essential + netapp.aws features have a broad impact. + + +
+ + ❗ Every change breaks someone's workflow. + + + + [![❗ Every change breaks someone's workflow. + ](https://imgs.xkcd.com/comics/workflow.png) + ](https://xkcd.com/1172/) +
+ + + ⚠ + Verify first that your idea is not [already requested on + GitHub][issue search]. + + Also test if the main branch does not already implement this. + + +- type: textarea + attributes: + label: Summary + description: > + Describe the new feature/improvement you would like briefly below. + + + What's the problem this feature will solve? + + What are you trying to do, that you are unable to achieve + with netapp.aws as it currently stands? + + + * Provide examples of real-world use cases that this would enable + and how it solves the problem you described. + + * How do you solve this now? + + * Have you tried to work around the problem using other tools? + + * Could there be a different approach to solving this issue? + + placeholder: >- + I am trying to do X with netapp.aws from the devel branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of netapp.aws because of Z. + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: >- + I asked on https://stackoverflow.com/.... and the community + advised me to do X, Y and Z. + validations: + required: true + +... diff --git a/ansible_collections/netapp/aws/.github/workflows/coverage.yml b/ansible_collections/netapp/aws/.github/workflows/coverage.yml new file mode 100644 index 000000000..d254e6081 --- /dev/null +++ b/ansible_collections/netapp/aws/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.aws Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on AWS + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.11 + run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/aws/ + rsync -av . ansible_collections/netapp/aws/ --exclude ansible_collections/netapp/aws/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/aws/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/aws/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/aws/ + verbose: true \ No newline at end of file diff --git a/ansible_collections/netapp/aws/.github/workflows/main.yml b/ansible_collections/netapp/aws/.github/workflows/main.yml new file mode 100644 index 000000000..d4357270d --- /dev/null +++ b/ansible_collections/netapp/aws/.github/workflows/main.yml @@ -0,0 +1,45 @@ +name: NetApp.aws Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }} on AWS + runs-on: ubuntu-latest + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - devel + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/aws/ + rsync -av . ansible_collections/netapp/aws/ --exclude ansible_collections/netapp/aws/ + - name: Run sanity tests AWS + run: ansible-test sanity --docker -v --color + working-directory: ansible_collections/netapp/aws/ + + - name: Run Unit Tests + run: ansible-test units --docker -v --color + working-directory: ansible_collections/netapp/aws/ diff --git a/ansible_collections/netapp/aws/CHANGELOG.rst b/ansible_collections/netapp/aws/CHANGELOG.rst new file mode 100644 index 000000000..11be8dbab --- /dev/null +++ b/ansible_collections/netapp/aws/CHANGELOG.rst @@ -0,0 +1,90 @@ +======================================= +NetApp AWS CVS Collection Release Notes +======================================= + +.. contents:: Topics + + +v21.7.0 +======= + +Minor Changes +------------- + +- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +v21.6.0 +======= + +Minor Changes +------------- + +- all modules - ability to trace API calls and responses. + +Bugfixes +-------- + +- all modules - fix traceback TypeError 'NoneType' object is not subscriptable when URL points to a web server. + +v21.2.0 +======= + +Bugfixes +-------- + +- all modules - disable logging for ``api_key`` and ``secret_key`` values. +- all modules - prevent infinite loop when asynchronous action fails. +- all modules - report error if response does not contain valid JSON. +- aws_netapp_cvs_filesystems - fix KeyError when exportPolicy is not present. + +v20.9.0 +======= + +Minor Changes +------------- + +- Fix pylint or flake8 warnings reported by galaxy importer. + +v20.8.0 +======= + +Minor Changes +------------- + +- add "elements:" and update "required:" to match module requirements. +- use a three group format for version_added. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + +v20.6.0 +======= + +Bugfixes +-------- + +- galaxy.yml - fix repository and homepage links. + +v20.2.0 +======= + +Bugfixes +-------- + +- galaxy.yml - fix path to github repository. + +v19.10.0 +======== + +Minor Changes +------------- + +- refactor existing modules as a collection + +v2.9.0 +====== + +New Modules +----------- + +- netapp.aws.aws_netapp_cvs_active_directory - NetApp AWS CloudVolumes Service Manage Active Directory. +- netapp.aws.aws_netapp_cvs_filesystems - NetApp AWS Cloud Volumes Service Manage FileSystem. +- netapp.aws.aws_netapp_cvs_pool - NetApp AWS Cloud Volumes Service Manage Pools. +- netapp.aws.aws_netapp_cvs_snapshots - NetApp AWS Cloud Volumes Service Manage Snapshots. diff --git a/ansible_collections/netapp/aws/COPYING b/ansible_collections/netapp/aws/COPYING new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/ansible_collections/netapp/aws/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/aws/FILES.json b/ansible_collections/netapp/aws/FILES.json new file mode 100644 index 000000000..0c3ecec9c --- /dev/null +++ b/ansible_collections/netapp/aws/FILES.json @@ -0,0 +1,376 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec72420df5dfbdce4111f715c96338df3b7cb75f58e478d2449c9720e560de8c", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73a260df376d83b7076b7654a10e9f238de604470a6ba309e8c6019c0f710203", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c23b3d0d56e977cf8d27dce883c871960efb9707c89cd5d82147e36d93945bf", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d69e77a6e5b76dc8909149c8c364454e80fb42631af7d889dfb6e2ff0438c3e", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/aws_netapp_cvs_filesystems.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "600bccc2f8464217ff4672ba52c160fdcbdc27d40ae33b29f8944af10b14af18", + "format": 1 + }, + { + "name": "plugins/modules/aws_netapp_cvs_active_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a040e165f265ad4cfc04672506be81a07a032a9e2769f5d84b2a77c3be81fce0", + "format": 1 + }, + { + "name": "plugins/modules/aws_netapp_cvs_snapshots.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8edfd787384f01ef37a0032e60898b0253472355a32e420b439e1dbb4d385e85", + "format": 1 + }, + { + "name": "plugins/modules/aws_netapp_cvs_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a4b6fc9d48d61cf80a052455334ffd671dd880e7ec476aff6ccae820b658610", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68a61b1d58a722f4ffabaa28da01c9837c93a582ea41c1bfb1c1fd54ea2d8fab", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d8932ad58a04840536e850abf3f131960e99ec55546081fb63713dbfc3bc05d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "553b6f1afd566bebf6a90de18f71517efc3a41953bade06cd8972fcbff9ea1fb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b42a059c1dfd757cd6294ca9ebce74f1e3ce6690bcddcdca2cdb4e6b8ac771b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f74d3b3ecbaf71433fcf569a9a09d134f241c8eb5a8e2ed373eeb5638fc79b2e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64f0c7995f02daaf91d3d6cc15f2347ecba5253a2dc13f7a7df6a880c0926fcf", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "064142cc7f33ebdf05a9f5eb31629c37c1f2cc791728b9e8bfa7d6d02753841f", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/20.9.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "adc1cab2c7625a1e299876d9d622eb1e7529af59042268e088673f408c1d1dce", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3569.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3e0500c584d187e1b339a612f800540a39cddcebe5c3e1c8c2e134af0b2baf6", + "format": 1 + }, + { + "name": "changelogs/fragments/2019.10.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b1a5ef7df5f1e6e66ddc013149aea0480eb79f911a0563e2e6d7d9af79d5572", + "format": 1 + }, + { + "name": "changelogs/fragments/20.2.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91918f48a406834778ff7163c92c12dd1802c0620cb681ee66f8a4709444cf5e", + "format": 1 + }, + { + "name": "changelogs/fragments/20.8.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a91cbe5f46d2bae6af1eb3693470bdfaf04e5fbd6cdc76882e674389f4352f16", + "format": 1 + }, + { + "name": "changelogs/fragments/20.6.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6192b3cccdc7c1e1eb0d61a49dd20c6f234499b6dd9b52b2f974b673e99f7a47", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3644.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7eaf09f11cdf4fd2628e29124ce128dd984340ee65a233acdde77369ebf08ada", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4416.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0bb3c0938ee0599c727ceef11d224bd771e9db9dc7a0bca162d786c2933ea89", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f1d175c82536c75b0c17c289a6aa7e9bd2faeea39485d571cea6cba5c86d9aa", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35c36cfbf3376b4441c096654932c92810d8970c89f50a21adffa44da6609aca", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "876f2d16a1c4169c208ddec8702048f376eeacd4f697a10cfe4a948444ce3f4e", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b30611d534ec350cce00d5c14b28fd26f3961c7e05f714c79022c4af91975620", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4204f720b07bf0636be6a9c39717e84e59dc1a0b36425bf0f10fc9817131d3e7", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bcc8888ddd84ef0fc9efe03e784278a748a71ec3e4fffa62dc4a8ad02007760", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9a90849f79ddb81dc0f31e6cad9e6273e82d60b8651404fa31e40f0a6ee66b1", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/aws/MANIFEST.json b/ansible_collections/netapp/aws/MANIFEST.json new file mode 100644 index 000000000..373e1c4f1 --- /dev/null +++ b/ansible_collections/netapp/aws/MANIFEST.json @@ -0,0 +1,35 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "aws", + "version": "21.7.0", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "storage", + "cloud", + "netapp", + "cvs", + "amazon", + "aws" + ], + "description": "Cloud Volumes Service (CVS) for AWS", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/netapp.aws", + "documentation": null, + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": "https://github.com/ansible-collections/netapp.aws/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8d3153cb010a888ed1cd2f4912db9e5c59533c0eb853bc6fa6495b7a7d8811e", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/aws/README.md b/ansible_collections/netapp/aws/README.md new file mode 100644 index 000000000..7dbbfb840 --- /dev/null +++ b/ansible_collections/netapp/aws/README.md @@ -0,0 +1,86 @@ +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/aws/index.html) +![example workflow](https://github.com/ansible-collections/netapp.aws/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.aws/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.aws) + + +============================================================= + +netapp.aws + +NetApp AWS CVS Collection + +Copyright (c) 2019 NetApp, Inc. All rights reserved. +Specifications subject to change without notice. + +============================================================= + +# Installation +```bash +ansible-galaxy collection install netapp.aws +``` +To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module +``` +collections: + - netapp.aws +``` + +# Module documentation +https://docs.ansible.com/ansible/devel/collections/netapp/aws/ + +# Need help +Join our Slack Channel at [Netapp.io](http://netapp.io/slack) + +# Notes + +These Ansible modules are supporting NetApp Cloud Volumes Service for AWS. + +They require a subscription to the service and your API access keys. + +The modules currently support Active Directory, Pool, FileSystem (Volume), and Snapshot services. + +# Release Notes + + +## 21.7.0 + +### Minor Changes +- all modules - allow usage of Ansible module group defaults - for Ansible 2.12+. + +## 21.6.0 + +### Minor Changes +- all modules - ability to trace API calls and responses. + +### Bug Fixes +- all modules - fix traceback TypeError 'NoneType' object is not subscriptable when URL points to a web server. + +## 21.2.0 + +### Bug Fixes +- aws_netapp_cvs_filesystems - fix KeyError when exportPolicy is not present. +- all modules - disable logging for `api_key` and `secret_key` values. +- all modules - report error if response does not contain valid JSON. +- all modules - prevent infinite loop when asynchornous action fails. + +## 20.9.0 + +Fix pylint or flake8 warnings reported by galaxy importer. + +## 20.8.0 + +### Module documentation changes +- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. +- add `elements:` and update `required:` to match module requirements. + +## 20.6.0 + +### Bug Fixes +- galaxy.xml: fix repository and homepage links. + +## 20.2.0 + +### Bug Fixes +- galaxy.yml: fix path to github repository. + +## 19.11.0 +- Initial release as a collection. diff --git a/ansible_collections/netapp/aws/changelogs/changelog.yaml b/ansible_collections/netapp/aws/changelogs/changelog.yaml new file mode 100644 index 000000000..ba907fcd5 --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/changelog.yaml @@ -0,0 +1,81 @@ +ancestor: null +releases: + 19.10.0: + changes: + minor_changes: + - refactor existing modules as a collection + fragments: + - 2019.10.0.yaml + release_date: '2019-11-14' + 2.9.0: + modules: + - description: NetApp AWS CloudVolumes Service Manage Active Directory. + name: aws_netapp_cvs_active_directory + namespace: '' + - description: NetApp AWS Cloud Volumes Service Manage FileSystem. + name: aws_netapp_cvs_filesystems + namespace: '' + - description: NetApp AWS Cloud Volumes Service Manage Pools. + name: aws_netapp_cvs_pool + namespace: '' + - description: NetApp AWS Cloud Volumes Service Manage Snapshots. + name: aws_netapp_cvs_snapshots + namespace: '' + release_date: '2019-11-13' + 20.2.0: + changes: + bugfixes: + - galaxy.yml - fix path to github repository. + fragments: + - 20.2.0.yaml + release_date: '2020-02-05' + 20.6.0: + changes: + bugfixes: + - galaxy.yml - fix repository and homepage links. + fragments: + - 20.6.0.yaml + release_date: '2020-06-03' + 20.8.0: + changes: + minor_changes: + - add "elements:" and update "required:" to match module requirements. + - use a three group format for version_added. So 2.7 becomes 2.7.0. Same thing + for 2.8 and 2.9. + fragments: + - 20.8.0.yaml + release_date: '2020-08-05' + 20.9.0: + changes: + minor_changes: + - Fix pylint or flake8 warnings reported by galaxy importer. + fragments: + - 20.9.0.yaml + release_date: '2020-09-02' + 21.2.0: + changes: + bugfixes: + - all modules - disable logging for ``api_key`` and ``secret_key`` values. + - all modules - prevent infinite loop when asynchronous action fails. + - all modules - report error if response does not contain valid JSON. + - aws_netapp_cvs_filesystems - fix KeyError when exportPolicy is not present. + fragments: + - DEVOPS-3644.yaml + release_date: '2021-02-04' + 21.6.0: + changes: + bugfixes: + - all modules - fix traceback TypeError 'NoneType' object is not subscriptable + when URL points to a web server. + minor_changes: + - all modules - ability to trace API calls and responses. + fragments: + - DEVOPS-3569.yaml + release_date: '2021-07-14' + 21.7.0: + changes: + minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + fragments: + - DEVOPS-4416.yaml + release_date: '2021-11-03' diff --git a/ansible_collections/netapp/aws/changelogs/config.yaml b/ansible_collections/netapp/aws/changelogs/config.yaml new file mode 100644 index 000000000..5f649c68c --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: NetApp AWS CVS Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/aws/changelogs/fragments/20.2.0.yaml b/ansible_collections/netapp/aws/changelogs/fragments/20.2.0.yaml new file mode 100644 index 000000000..3f764c1c9 --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/20.2.0.yaml @@ -0,0 +1,2 @@ +bugfixes: + - galaxy.yml - fix path to github repository. diff --git a/ansible_collections/netapp/aws/changelogs/fragments/20.6.0.yaml b/ansible_collections/netapp/aws/changelogs/fragments/20.6.0.yaml new file mode 100644 index 000000000..fcd0d11ee --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/20.6.0.yaml @@ -0,0 +1,2 @@ +bugfixes: + - galaxy.yml - fix repository and homepage links. diff --git a/ansible_collections/netapp/aws/changelogs/fragments/20.8.0.yaml b/ansible_collections/netapp/aws/changelogs/fragments/20.8.0.yaml new file mode 100644 index 000000000..c92e9e41b --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/20.8.0.yaml @@ -0,0 +1,3 @@ +minor_changes: + - use a three group format for version_added. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + - add "elements:" and update "required:" to match module requirements. diff --git a/ansible_collections/netapp/aws/changelogs/fragments/20.9.0.yaml b/ansible_collections/netapp/aws/changelogs/fragments/20.9.0.yaml new file mode 100644 index 000000000..c7328c5eb --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/20.9.0.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Fix pylint or flake8 warnings reported by galaxy importer. diff --git a/ansible_collections/netapp/aws/changelogs/fragments/2019.10.0.yaml b/ansible_collections/netapp/aws/changelogs/fragments/2019.10.0.yaml new file mode 100644 index 000000000..5723daa11 --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/2019.10.0.yaml @@ -0,0 +1,2 @@ +minor_changes: + - refactor existing modules as a collection diff --git a/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3569.yaml b/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3569.yaml new file mode 100644 index 000000000..19ba55d8d --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3569.yaml @@ -0,0 +1,4 @@ +minor_changes: + - all modules - ability to trace API calls and responses. +bugfixes: + - all modules - fix traceback TypeError 'NoneType' object is not subscriptable when URL points to a web server. diff --git a/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3644.yaml b/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3644.yaml new file mode 100644 index 000000000..2c7e83f31 --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-3644.yaml @@ -0,0 +1,5 @@ +bugfixes: + - aws_netapp_cvs_filesystems - fix KeyError when exportPolicy is not present. + - all modules - disable logging for ``api_key`` and ``secret_key`` values. + - all modules - report error if response does not contain valid JSON. + - all modules - prevent infinite loop when asynchronous action fails. diff --git a/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-4416.yaml new file mode 100644 index 000000000..6b4b660a0 --- /dev/null +++ b/ansible_collections/netapp/aws/changelogs/fragments/DEVOPS-4416.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/aws/meta/runtime.yml b/ansible_collections/netapp/aws/meta/runtime.yml new file mode 100644 index 000000000..048c94c14 --- /dev/null +++ b/ansible_collections/netapp/aws/meta/runtime.yml @@ -0,0 +1,8 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_aws: + - aws_netapp_cvs_active_directory + - aws_netapp_cvs_filesystems + - aws_netapp_cvs_pool + - aws_netapp_cvs_snapshots diff --git a/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..aff60719f --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/doc_fragments/netapp.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, NetApp Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - This is documentation for NetApp's AWS CVS modules. +''' + + # Documentation fragment for AWSCVS + AWSCVS = """ +options: + api_key: + required: true + type: str + description: + - The access key to authenticate with the AWSCVS Web Services Proxy or Embedded Web Services API. + secret_key: + required: true + type: str + description: + - The secret_key to authenticate with the AWSCVS Web Services Proxy or Embedded Web Services API. + api_url: + required: true + type: str + description: + - The url to the AWSCVS Web Services Proxy or Embedded Web Services API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + feature_flags: + description: + - Enable or disable a new feature. + - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility. + - Supported keys and values are subject to change without notice. Unknown keys are ignored. + - trace_apis can be set to true to enable tracing, data is written to /tmp/um_apis.log. + type: dict + version_added: 21.6.0 +notes: + - The modules prefixed with aws\\_cvs\\_netapp are built to Manage AWS Cloud Volumes Service . +""" diff --git a/ansible_collections/netapp/aws/plugins/module_utils/netapp.py b/ansible_collections/netapp/aws/plugins/module_utils/netapp.py new file mode 100644 index 000000000..df7b85ffe --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/module_utils/netapp.py @@ -0,0 +1,241 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2019, NetApp Ansible Team +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' +netapp.py +Support methods and class for AWS CVS modules +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import logging +import time +from ansible.module_utils.basic import missing_required_lib + +try: + from ansible.module_utils.ansible_release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + +COLLECTION_VERSION = "21.7.0" + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + + +POW2_BYTE_MAP = dict( + # Here, 1 kb = 1024 + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 +) + +LOG = logging.getLogger(__name__) +LOG_FILE = '/tmp/aws_cvs_apis.log' + + +def aws_cvs_host_argument_spec(): + + return dict( + api_url=dict(required=True, type='str'), + validate_certs=dict(required=False, type='bool', default=True), + api_key=dict(required=True, type='str', no_log=True), + secret_key=dict(required=True, type='str', no_log=True), + feature_flags=dict(required=False, type='dict', default=dict()), + ) + + +def has_feature(module, feature_name): + feature = get_feature(module, feature_name) + if isinstance(feature, bool): + return feature + module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name) + + +def get_feature(module, feature_name): + ''' if the user has configured the feature, use it + otherwise, use our default + ''' + default_flags = dict( + strict_json_check=True, # if true, fail if response.content in not empty and is not valid json + trace_apis=False, # if true, append REST requests/responses to LOG_FILE + ) + + if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']: + return module.params['feature_flags'][feature_name] + if feature_name in default_flags: + return default_flags[feature_name] + module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name) + + +class AwsCvsRestAPI(object): + ''' wraps requests methods to interface with AWS CVS REST APIs ''' + def __init__(self, module, timeout=60): + self.module = module + self.api_key = self.module.params['api_key'] + self.secret_key = self.module.params['secret_key'] + self.api_url = self.module.params['api_url'] + self.verify = self.module.params['validate_certs'] + self.timeout = timeout + self.url = 'https://' + self.api_url + '/v1/' + self.errors = list() + self.debug_logs = list() + self.check_required_library() + if has_feature(module, 'trace_apis'): + logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s') + + def check_required_library(self): + if not HAS_REQUESTS: + self.module.fail_json(msg=missing_required_lib('requests')) + + def send_request(self, method, api, params, json=None): + ''' send http request and process reponse, including error conditions ''' + if params is not None: + self.module.fail_json(msg='params is not implemented. api=%s, params=%s' % (api, repr(params))) + url = self.url + api + status_code = None + content = None + json_dict = None + json_error = None + error_details = None + headers = { + 'Content-type': "application/json", + 'api-key': self.api_key, + 'secret-key': self.secret_key, + 'Cache-Control': "no-cache", + } + + def check_contents(response): + '''json() may fail on an empty value, but it's OK if no response is expected. + To avoid false positives, only report an issue when we expect to read a value. + The first get will see it. + ''' + if method == 'GET' and has_feature(self.module, 'strict_json_check'): + contents = response.content + if len(contents) > 0: + raise ValueError("Expecting json, got: %s" % contents) + + def get_json(response): + ''' extract json, and error message if present ''' + error = None + try: + json = response.json() + except ValueError: + check_contents(response) + return None, None + success_code = [200, 201, 202] + if response.status_code not in success_code: + error = json.get('message') + return json, error + + def sanitize(value, key=None): + if isinstance(value, dict): + new_dict = dict() + for key, value in value.items(): + new_dict[key] = sanitize(value, key) + return new_dict + else: + if key in ['api-key', 'secret-key', 'password']: + return '*' * 12 + else: + return value + + self.log_debug('sending', repr(sanitize(dict(method=method, url=url, verify=self.verify, params=params, + timeout=self.timeout, json=json, headers=headers)))) + try: + response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json) + content = response.content # for debug purposes + status_code = response.status_code + # If the response was successful, no Exception will be raised + response.raise_for_status() + json_dict, json_error = get_json(response) + except requests.exceptions.HTTPError as err: + __, json_error = get_json(response) + if json_error is None: + self.log_error(status_code, 'HTTP error: %s' % err) + error_details = str(err) + # If an error was reported in the json payload, it is handled below + except requests.exceptions.ConnectionError as err: + self.log_error(status_code, 'Connection error: %s' % err) + error_details = str(err) + except Exception as err: + self.log_error(status_code, 'Other error: %s' % err) + error_details = 'general exception: %s' % str(err) + if json_error is not None: + self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error)) + error_details = json_error + self.log_debug(status_code, content) + return json_dict, error_details + + def get(self, api, params=None): + method = 'GET' + return self.send_request(method, api, params) + + def post(self, api, data, params=None): + method = 'POST' + return self.send_request(method, api, params, json=data) + + def patch(self, api, data, params=None): + method = 'PATCH' + return self.send_request(method, api, params, json=data) + + def put(self, api, data, params=None): + method = 'PUT' + return self.send_request(method, api, params, json=data) + + def delete(self, api, data, params=None): + method = 'DELETE' + return self.send_request(method, api, params, json=data) + + def get_state(self, job_id): + """ Method to get the state of the job """ + response, dummy = self.get('Jobs/%s' % job_id) + while str(response['state']) == 'ongoing': + time.sleep(15) + response, dummy = self.get('Jobs/%s' % job_id) + return str(response['state']) + + def log_error(self, status_code, message): + LOG.error("%s: %s", status_code, message) + self.errors.append(message) + self.debug_logs.append((status_code, message)) + + def log_debug(self, status_code, content): + LOG.debug("%s: %s", status_code, content) + self.debug_logs.append((status_code, content)) diff --git a/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..3e31ae989 --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/module_utils/netapp_module.py @@ -0,0 +1,142 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def cmp(a, b): + """ + Python 3 does not have a cmp function, this will do the cmp. + :param a: first object to check + :param b: second object to check + :return: + """ + # convert to lower case for string comparison. + if a is None: + return -1 + if isinstance(a, str) and isinstance(b, str): + a = a.lower() + b = b.lower() + # if list has string element, convert string to lower case. + if isinstance(a, list) and isinstance(b, list): + a = [x.lower() if isinstance(x, str) else x for x in a] + b = [x.lower() if isinstance(x, str) else x for x in b] + a.sort() + b.sort() + return (a > b) - (a < b) + + +class NetAppModule(object): + ''' + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + ''' + + def __init__(self): + self.log = list() + self.changed = False + self.parameters = {'name': 'not intialized'} + + def set_parameters(self, ansible_params): + self.parameters = dict() + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters + + def get_cd_action(self, current, desired): + ''' takes a desired state and a current state, and return an action: + create, delete, None + eg: + is_present = 'absent' + some_object = self.get_object(source) + if some_object is not None: + is_present = 'present' + action = cd_action(current=is_present, desired = self.desired.state()) + ''' + if 'state' in desired: + desired_state = desired['state'] + else: + desired_state = 'present' + + if current is None and desired_state == 'absent': + return None + if current is not None and desired_state == 'present': + return None + # change in state + self.changed = True + if current is not None: + return 'delete' + return 'create' + + def compare_and_update_values(self, current, desired, keys_to_compare): + updated_values = dict() + is_changed = False + for key in keys_to_compare: + if key in current: + if key in desired and desired[key] is not None: + if current[key] != desired[key]: + updated_values[key] = desired[key] + is_changed = True + else: + updated_values[key] = current[key] + else: + updated_values[key] = current[key] + + return updated_values, is_changed + + def is_rename_action(self, source, target): + ''' takes a source and target object, and returns True + if a rename is required + eg: + source = self.get_object(source_name) + target = self.get_object(target_name) + action = is_rename_action(source, target) + :return: None for error, True for rename action, False otherwise + ''' + if source is None and target is None: + # error, do nothing + # cannot rename an non existent resource + # alternatively we could create B + return None + if source is not None and target is not None: + # error, do nothing + # idempotency (or) new_name_is_already_in_use + # alternatively we could delete B and rename A to B + return False + if source is None and target is not None: + # do nothing, maybe the rename was already done + return False + # source is not None and target is None: + # rename is in order + self.changed = True + return True diff --git a/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py new file mode 100644 index 000000000..b64c877b4 --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_active_directory.py @@ -0,0 +1,276 @@ +#!/usr/bin/python + +# (c) 2019, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""AWS Cloud Volumes Services - Manage ActiveDirectory""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: aws_netapp_cvs_active_directory + +short_description: NetApp AWS CloudVolumes Service Manage Active Directory. +extends_documentation_fragment: + - netapp.aws.netapp.awscvs +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create, Update, Delete ActiveDirectory on AWS Cloud Volumes Service. + +options: + state: + description: + - Whether the specified ActiveDirectory should exist or not. + choices: ['present', 'absent'] + required: true + type: str + + region: + description: + - The region to which the Active Directory credentials are associated. + required: true + type: str + + domain: + description: + - Name of the Active Directory domain + type: str + + DNS: + description: + - DNS server address for the Active Directory domain + - Required when C(state=present) + - Required when C(state=present), to modify ActiveDirectory properties. + type: str + + netBIOS: + description: + - NetBIOS name of the server. + type: str + + username: + description: + - Username of the Active Directory domain administrator + type: str + + password: + description: + - Password of the Active Directory domain administrator + - Required when C(state=present), to modify ActiveDirectory properties + type: str +''' + +EXAMPLES = """ + - name: Create Active Directory + aws_netapp_cvs_active_directory.py: + state: present + region: us-east-1 + DNS: 101.102.103.123 + domain: mydomain.com + password: netapp1! + netBIOS: testing + username: user1 + api_url : My_CVS_Hostname + api_key: My_API_Key + secret_key : My_Secret_Key + + - name: Update Active Directory + aws_netapp_cvs_active_directory.py: + state: present + region: us-east-1 + DNS: 101.102.103.123 + domain: mydomain.com + password: netapp2! + netBIOS: testingBIOS + username: user2 + api_url : My_CVS_Hostname + api_key: My_API_Key + secret_key : My_Secret_Key + + - name: Delete Active Directory + aws_netapp_cvs_active_directory.py: + state: absent + region: us-east-1 + domain: mydomain.com + api_url : My_CVS_Hostname + api_key: My_API_Key + secret_key : My_Secret_Key +""" + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI + + +class AwsCvsNetappActiveDir(object): + """ + Contains methods to parse arguments, + derive details of AWS_CVS objects + and send requests to AWS CVS via + the restApi + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check paramenters and ensure request module is installed + """ + self.argument_spec = netapp_utils.aws_cvs_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent'], type='str'), + region=dict(required=True, type='str'), + DNS=dict(required=False, type='str'), + domain=dict(required=False, type='str'), + password=dict(required=False, type='str', no_log=True), + netBIOS=dict(required=False, type='str'), + username=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['domain', 'password']), + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic AWSCVS restApi class + self.rest_api = AwsCvsRestAPI(self.module) + + def get_activedirectory_id(self): + # Check if ActiveDirectory exists + # Return UUID for ActiveDirectory is found, None otherwise + try: + list_activedirectory, error = self.rest_api.get('Storage/ActiveDirectory') + except Exception: + return None + if error is not None: + self.module.fail_json(msg='Error calling list_activedirectory: %s' % error) + + for activedirectory in list_activedirectory: + if activedirectory['region'] == self.parameters['region']: + return activedirectory['UUID'] + return None + + def get_activedirectory(self, activedirectory_id=None): + if activedirectory_id is None: + return None + else: + activedirectory_info, error = self.rest_api.get('Storage/ActiveDirectory/%s' % activedirectory_id) + if not error: + return activedirectory_info + return None + + def create_activedirectory(self): + # Create ActiveDirectory + api = 'Storage/ActiveDirectory' + data = {"region": self.parameters['region'], "DNS": self.parameters['DNS'], "domain": self.parameters['domain'], + "username": self.parameters['username'], "password": self.parameters['password'], "netBIOS": self.parameters['netBIOS']} + + response, error = self.rest_api.post(api, data) + + if not error: + return response + else: + self.module.fail_json(msg=response['message']) + + def delete_activedirectory(self): + activedirectory_id = self.get_activedirectory_id() + # Delete ActiveDirectory + + if activedirectory_id: + api = 'Storage/ActiveDirectory/' + activedirectory_id + data = None + response, error = self.rest_api.delete(api, data) + if not error: + return response + else: + self.module.fail_json(msg=response['message']) + + else: + self.module.fail_json(msg="Active Directory does not exist") + + def update_activedirectory(self, activedirectory_id, updated_activedirectory): + # Update ActiveDirectory + api = 'Storage/ActiveDirectory/' + activedirectory_id + data = { + "region": self.parameters['region'], + "DNS": updated_activedirectory['DNS'], + "domain": updated_activedirectory['domain'], + "username": updated_activedirectory['username'], + "password": updated_activedirectory['password'], + "netBIOS": updated_activedirectory['netBIOS'] + } + + response, error = self.rest_api.put(api, data) + if not error: + return response + else: + self.module.fail_json(msg=response['message']) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + modify = False + activedirectory_id = self.get_activedirectory_id() + current = self.get_activedirectory(activedirectory_id) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if current and self.parameters['state'] != 'absent': + keys_to_check = ['DNS', 'domain', 'username', 'netBIOS'] + updated_active_directory, modify = self.na_helper.compare_and_update_values(current, self.parameters, keys_to_check) + + if self.parameters['password']: + modify = True + updated_active_directory['password'] = self.parameters['password'] + + if modify is True: + self.na_helper.changed = True + if 'domain' in self.parameters and self.parameters['domain'] is not None: + ad_exists = self.get_activedirectory(updated_active_directory['domain']) + if ad_exists: + modify = False + self.na_helper.changed = False + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if modify is True: + self.update_activedirectory(activedirectory_id, updated_active_directory) + elif cd_action == 'create': + self.create_activedirectory() + elif cd_action == 'delete': + self.delete_activedirectory() + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ + Main function + """ + aws_netapp_cvs_active_directory = AwsCvsNetappActiveDir() + aws_netapp_cvs_active_directory.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py new file mode 100644 index 000000000..09190b39e --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_filesystems.py @@ -0,0 +1,362 @@ +#!/usr/bin/python + +# (c) 2019, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""AWS Cloud Volumes Services - Manage fileSystem""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: aws_netapp_cvs_filesystems + +short_description: NetApp AWS Cloud Volumes Service Manage FileSystem. +extends_documentation_fragment: + - netapp.aws.netapp.awscvs +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, Update, Delete fileSystem on AWS Cloud Volumes Service. + +options: + state: + description: + - Whether the specified fileSystem should exist or not. + required: true + choices: ['present', 'absent'] + type: str + + region: + description: + - The region to which the filesystem belongs to. + required: true + type: str + + creationToken: + description: + - Name of the filesystem + required: true + type: str + + quotaInBytes: + description: + - Size of the filesystem + - Required for create + type: int + + serviceLevel: + description: + - Service Level of a filesystem. + choices: ['standard', 'premium', 'extreme'] + type: str + + exportPolicy: + description: + - The policy rules to export the filesystem + type: dict + suboptions: + rules: + description: + - Set of rules to export the filesystem + - Requires allowedClients, access and protocol + type: list + elements: dict + suboptions: + allowedClients: + description: + - Comma separated list of ip address blocks of the clients to access the fileSystem + - Each address block contains the starting IP address and size for the block + type: str + + cifs: + description: + - Enable or disable cifs filesystem + type: bool + + nfsv3: + description: + - Enable or disable nfsv3 fileSystem + type: bool + + nfsv4: + description: + - Enable or disable nfsv4 filesystem + type: bool + + ruleIndex: + description: + - Index number of the rule + type: int + + unixReadOnly: + description: + - Should fileSystem have read only permission or not + type: bool + + unixReadWrite: + description: + - Should fileSystem have read write permission or not + type: bool +''' + +EXAMPLES = """ +- name: Create FileSystem + aws_netapp_cvs_filesystems: + state: present + region: us-east-1 + creationToken: newVolume-1 + exportPolicy: + rules: + - allowedClients: 172.16.0.4 + cifs: False + nfsv3: True + nfsv4: True + ruleIndex: 1 + unixReadOnly: True + unixReadWrite: False + quotaInBytes: 100000000000 + api_url : cds-aws-bundles.netapp.com:8080 + api_key: My_API_Key + secret_key : My_Secret_Key + +- name: Update FileSystem + aws_netapp_cvs_filesystems: + state: present + region: us-east-1 + creationToken: newVolume-1 + exportPolicy: + rules: + - allowedClients: 172.16.0.4 + cifs: False + nfsv3: True + nfsv4: True + ruleIndex: 1 + unixReadOnly: True + unixReadWrite: False + quotaInBytes: 200000000000 + api_url : cds-aws-bundles.netapp.com:8080 + api_key: My_API_Key + secret_key : My_Secret_Key + +- name: Delete FileSystem + aws_netapp_cvs_filesystems: + state: present + region: us-east-1 + creationToken: newVolume-1 + quotaInBytes: 100000000000 + api_url : cds-aws-bundles.netapp.com:8080 + api_key: My_API_Key + secret_key : My_Secret_Key +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI + + +class AwsCvsNetappFileSystem(object): + """ + Contains methods to parse arguments, + derive details of AWS_CVS objects + and send requests to AWS CVS via + the restApi + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check paramenters and ensure request module is installed + """ + self.argument_spec = netapp_utils.aws_cvs_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + region=dict(required=True, type='str'), + creationToken=dict(required=True, type='str', no_log=False), + quotaInBytes=dict(required=False, type='int'), + serviceLevel=dict(required=False, choices=['standard', 'premium', 'extreme']), + exportPolicy=dict( + type='dict', + options=dict( + rules=dict( + type='list', + elements='dict', + options=dict( + allowedClients=dict(required=False, type='str'), + cifs=dict(required=False, type='bool'), + nfsv3=dict(required=False, type='bool'), + nfsv4=dict(required=False, type='bool'), + ruleIndex=dict(required=False, type='int'), + unixReadOnly=dict(required=False, type='bool'), + unixReadWrite=dict(required=False, type='bool') + ) + ) + ) + ), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['region', 'creationToken', 'quotaInBytes']), + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Calling generic AWSCVS restApi class + self.rest_api = AwsCvsRestAPI(self.module) + + self.data = {} + for key in self.parameters.keys(): + self.data[key] = self.parameters[key] + + def get_filesystem_id(self): + # Check given FileSystem is exists + # Return fileSystemId is found, None otherwise + list_filesystem, error = self.rest_api.get('FileSystems') + if error: + self.module.fail_json(msg=error) + + for filesystem in list_filesystem: + if filesystem['creationToken'] == self.parameters['creationToken']: + return filesystem['fileSystemId'] + return None + + def get_filesystem(self, filesystem_id): + # Get FileSystem information by fileSystemId + # Return fileSystem Information + filesystem_info, error = self.rest_api.get('FileSystems/%s' % filesystem_id) + if error: + self.module.fail_json(msg=error) + else: + return filesystem_info + return None + + def is_job_done(self, response): + # check jobId is present and equal to 'done' + # return True on success, False otherwise + try: + job_id = response['jobs'][0]['jobId'] + except TypeError: + job_id = None + + if job_id is not None and self.rest_api.get_state(job_id) == 'done': + return True + return False + + def create_filesystem(self): + # Create fileSystem + api = 'FileSystems' + response, error = self.rest_api.post(api, self.data) + if not error: + if self.is_job_done(response): + return + error = "Error: unexpected response on FileSystems create: %s" % str(response) + self.module.fail_json(msg=error) + + def delete_filesystem(self, filesystem_id): + # Delete FileSystem + api = 'FileSystems/' + filesystem_id + self.data = None + response, error = self.rest_api.delete(api, self.data) + if not error: + if self.is_job_done(response): + return + error = "Error: unexpected response on FileSystems delete: %s" % str(response) + self.module.fail_json(msg=error) + + def update_filesystem(self, filesystem_id): + # Update FileSystem + api = 'FileSystems/' + filesystem_id + response, error = self.rest_api.put(api, self.data) + if not error: + if self.is_job_done(response): + return + error = "Error: unexpected response on FileSystems update: %s" % str(response) + self.module.fail_json(msg=error) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + filesystem = None + filesystem_id = self.get_filesystem_id() + + if filesystem_id: + # Getting the FileSystem details + filesystem = self.get_filesystem(filesystem_id) + + cd_action = self.na_helper.get_cd_action(filesystem, self.parameters) + + if cd_action is None and self.parameters['state'] == 'present': + # Check if we need to update the fileSystem + update_filesystem = False + if filesystem['quotaInBytes'] is not None and 'quotaInBytes' in self.parameters \ + and filesystem['quotaInBytes'] != self.parameters['quotaInBytes']: + update_filesystem = True + elif filesystem['creationToken'] is not None and 'creationToken' in self.parameters \ + and filesystem['creationToken'] != self.parameters['creationToken']: + update_filesystem = True + elif filesystem['serviceLevel'] is not None and 'serviceLevel' in self.parameters \ + and filesystem['serviceLevel'] != self.parameters['serviceLevel']: + update_filesystem = True + elif 'exportPolicy' in filesystem and filesystem['exportPolicy']['rules'] is not None and 'exportPolicy' in self.parameters: + for rule_org in filesystem['exportPolicy']['rules']: + for rule in self.parameters['exportPolicy']['rules']: + if rule_org['allowedClients'] != rule['allowedClients']: + update_filesystem = True + elif rule_org['unixReadOnly'] != rule['unixReadOnly']: + update_filesystem = True + elif rule_org['unixReadWrite'] != rule['unixReadWrite']: + update_filesystem = True + + if update_filesystem: + self.na_helper.changed = True + + result_message = "" + + if self.na_helper.changed: + if self.module.check_mode: + # Skip changes + result_message = "Check mode, skipping changes" + else: + if cd_action == "create": + self.create_filesystem() + result_message = "FileSystem Created" + elif cd_action == "delete": + self.delete_filesystem(filesystem_id) + result_message = "FileSystem Deleted" + else: # modify + self.update_filesystem(filesystem_id) + result_message = "FileSystem Updated" + self.module.exit_json(changed=self.na_helper.changed, msg=result_message) + + +def main(): + """ + Main function + """ + aws_cvs_netapp_filesystem = AwsCvsNetappFileSystem() + aws_cvs_netapp_filesystem.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py new file mode 100644 index 000000000..fa4818a3b --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_pool.py @@ -0,0 +1,267 @@ +#!/usr/bin/python + +# (c) 2019, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""AWS Cloud Volumes Services - Manage Pools""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: aws_netapp_cvs_pool + +short_description: NetApp AWS Cloud Volumes Service Manage Pools. +extends_documentation_fragment: + - netapp.aws.netapp.awscvs +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create, Update, Delete Pool on AWS Cloud Volumes Service. + +options: + state: + description: + - Whether the specified pool should exist or not. + choices: ['present', 'absent'] + required: true + type: str + region: + description: + - The region to which the Pool is associated. + required: true + type: str + name: + description: + - pool name ( The human readable name of the Pool ) + - name can be used for create, update and delete operations + required: true + type: str + serviceLevel: + description: + - The service level of the Pool + - can be used with pool create, update operations + choices: ['basic', 'standard', 'extreme'] + type: str + sizeInBytes: + description: + - Size of the Pool in bytes + - can be used with pool create, update operations + - minimum value is 4000000000000 bytes + type: int + vendorID: + description: + - A vendor ID for the Pool. E.g. an ID allocated by a vendor service for the Pool. + - can be used with pool create, update operations + - must be unique + type: str + from_name: + description: + - rename the existing pool name ( The human readable name of the Pool ) + - I(from_name) is the existing name, and I(name) the new name + - can be used with update operation + type: str +''' + +EXAMPLES = """ +- name: Create a new Pool + aws_netapp_cvs_pool: + state: present + name: TestPoolBB12 + serviceLevel: extreme + sizeInBytes: 4000000000000 + vendorID: ansiblePoolTestVendorBB12 + region: us-east-1 + api_url: cds-aws-bundles.netapp.com + api_key: MyAPiKey + secret_key: MySecretKey + +- name: Delete a Pool + aws_netapp_cvs_pool: + state: absent + name: TestPoolBB7 + region: us-east-1 + api_url: cds-aws-bundles.netapp.com + api_key: MyAPiKey + secret_key: MySecretKey + +- name: Update a Pool + aws_netapp_cvs_pool: + state: present + from_name: TestPoolBB12 + name: Mynewpool7 + vendorID: ansibleVendorMynewpool15 + serviceLevel: extreme + sizeInBytes: 4000000000000 + region: us-east-1 + api_url: cds-aws-bundles.netapp.com + api_key: MyAPiKey + secret_key: MySecretKey + +""" + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI + + +class NetAppAWSCVS(object): + '''Class for Pool operations ''' + + def __init__(self): + """ + Parse arguments, setup state variables, + """ + self.argument_spec = netapp_utils.aws_cvs_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + region=dict(required=True, type='str'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + serviceLevel=dict(required=False, choices=['basic', 'standard', 'extreme'], type='str'), + sizeInBytes=dict(required=False, type='int'), + vendorID=dict(required=False, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = AwsCvsRestAPI(self.module) + self.sizeinbytes_min_value = 4000000000000 + + def get_aws_netapp_cvs_pool(self, name=None): + """ + Returns Pool object if exists else Return None + """ + pool_info = None + + if name is None: + name = self.parameters['name'] + + pools, error = self.rest_api.get('Pools') + + if error is None and pools is not None: + for pool in pools: + if 'name' in pool and pool['region'] == self.parameters['region']: + if pool['name'] == name: + pool_info = pool + break + + return pool_info + + def create_aws_netapp_cvs_pool(self): + """ + Create a pool + """ + api = 'Pools' + + for key in ['serviceLevel', 'sizeInBytes', 'vendorID']: + if key not in self.parameters.keys() or self.parameters[key] is None: + self.module.fail_json(changed=False, msg="Mandatory key '%s' required" % (key)) + + pool = { + "name": self.parameters['name'], + "region": self.parameters['region'], + "serviceLevel": self.parameters['serviceLevel'], + "sizeInBytes": self.parameters['sizeInBytes'], + "vendorID": self.parameters['vendorID'] + } + + dummy, error = self.rest_api.post(api, pool) + if error is not None: + self.module.fail_json(changed=False, msg=error) + + def update_aws_netapp_cvs_pool(self, update_pool_info, pool_id): + """ + Update a pool + """ + api = 'Pools/' + pool_id + + pool = { + "name": update_pool_info['name'], + "region": self.parameters['region'], + "serviceLevel": update_pool_info['serviceLevel'], + "sizeInBytes": update_pool_info['sizeInBytes'], + "vendorID": update_pool_info['vendorID'] + } + + dummy, error = self.rest_api.put(api, pool) + if error is not None: + self.module.fail_json(changed=False, msg=error) + + def delete_aws_netapp_cvs_pool(self, pool_id): + """ + Delete a pool + """ + api = 'Pools/' + pool_id + data = None + dummy, error = self.rest_api.delete(api, data) + + if error is not None: + self.module.fail_json(changed=False, msg=error) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + update_required = False + cd_action = None + + if 'sizeInBytes' in self.parameters.keys() and self.parameters['sizeInBytes'] < self.sizeinbytes_min_value: + self.module.fail_json(changed=False, msg="sizeInBytes should be greater than or equal to %d" % (self.sizeinbytes_min_value)) + + current = self.get_aws_netapp_cvs_pool() + if self.parameters.get('from_name'): + existing = self.get_aws_netapp_cvs_pool(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(existing, current) + if rename is None: + self.module.fail_json(changed=False, msg="unable to rename pool: '%s' does not exist" % self.parameters['from_name']) + if rename: + current = existing + else: + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if cd_action is None and self.parameters['state'] == 'present': + keys_to_check = ['name', 'vendorID', 'sizeInBytes', 'serviceLevel'] + update_pool_info, update_required = self.na_helper.compare_and_update_values(current, self.parameters, keys_to_check) + + if update_required is True: + self.na_helper.changed = True + cd_action = 'update' + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'update': + self.update_aws_netapp_cvs_pool(update_pool_info, current['poolId']) + elif cd_action == 'create': + self.create_aws_netapp_cvs_pool() + elif cd_action == 'delete': + self.delete_aws_netapp_cvs_pool(current['poolId']) + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + '''Main Function''' + aws_cvs_netapp_pool = NetAppAWSCVS() + aws_cvs_netapp_pool.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py new file mode 100644 index 000000000..fa5c5f87c --- /dev/null +++ b/ansible_collections/netapp/aws/plugins/modules/aws_netapp_cvs_snapshots.py @@ -0,0 +1,245 @@ +#!/usr/bin/python + +# (c) 2019, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""AWS Cloud Volumes Services - Manage Snapshots""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: aws_netapp_cvs_snapshots + +short_description: NetApp AWS Cloud Volumes Service Manage Snapshots. +extends_documentation_fragment: + - netapp.aws.netapp.awscvs +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, Update, Delete Snapshot on AWS Cloud Volumes Service. + +options: + state: + description: + - Whether the specified snapshot should exist or not. + required: true + type: str + choices: ['present', 'absent'] + + region: + description: + - The region to which the snapshot belongs to. + required: true + type: str + + name: + description: + - Name of the snapshot + required: true + type: str + + fileSystemId: + description: + - Name or Id of the filesystem. + - Required for create operation + type: str + + from_name: + description: + - ID or Name of the snapshot to rename. + - Required to create an snapshot called 'name' by renaming 'from_name'. + type: str +''' + +EXAMPLES = """ +- name: Create Snapshot + aws_netapp_cvs_snapshots: + state: present + region: us-east-1 + name: testSnapshot + fileSystemId: testVolume + api_url : cds-aws-bundles.netapp.com + api_key: myApiKey + secret_key : mySecretKey + +- name: Update Snapshot + aws_netapp_cvs_snapshots: + state: present + region: us-east-1 + name: testSnapshot - renamed + from_name: testSnapshot + fileSystemId: testVolume + api_url : cds-aws-bundles.netapp.com + api_key: myApiKey + secret_key : mySecretKey + +- name: Delete Snapshot + aws_netapp_cvs_snapshots: + state: absent + region: us-east-1 + name: testSnapshot + api_url : cds-aws-bundles.netapp.com + api_key: myApiKey + secret_key : mySecretKey +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.aws.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.aws.plugins.module_utils.netapp import AwsCvsRestAPI + + +class AwsCvsNetappSnapshot(object): + """ + Contains methods to parse arguments, + derive details of AWS_CVS objects + and send requests to AWS CVS via + the restApi + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check paramenters and ensure request module is installed + """ + self.argument_spec = netapp_utils.aws_cvs_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + region=dict(required=True, type='str'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + fileSystemId=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['fileSystemId']), + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic AWSCVS restApi class + self.rest_api = AwsCvsRestAPI(self.module) + + # Checking for the parameters passed and create new parameters list + self.data = {} + for key in self.parameters.keys(): + self.data[key] = self.parameters[key] + + def get_snapshot_id(self, name): + # Check if snapshot exists + # Return snpashot Id If Snapshot is found, None otherwise + list_snapshots, error = self.rest_api.get('Snapshots') + + if error: + self.module.fail_json(msg=error) + + for snapshot in list_snapshots: + if snapshot['name'] == name: + return snapshot['snapshotId'] + return None + + def get_filesystem_id(self): + # Check given FileSystem is exists + # Return fileSystemId is found, None otherwise + list_filesystem, error = self.rest_api.get('FileSystems') + + if error: + self.module.fail_json(msg=error) + for filesystem in list_filesystem: + if filesystem['fileSystemId'] == self.parameters['fileSystemId']: + return filesystem['fileSystemId'] + elif filesystem['creationToken'] == self.parameters['fileSystemId']: + return filesystem['fileSystemId'] + return None + + def create_snapshot(self): + # Create Snapshot + api = 'Snapshots' + dummy, error = self.rest_api.post(api, self.data) + if error: + self.module.fail_json(msg=error) + + def rename_snapshot(self, snapshot_id): + # Rename Snapshot + api = 'Snapshots/' + snapshot_id + dummy, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + def delete_snapshot(self, snapshot_id): + # Delete Snapshot + api = 'Snapshots/' + snapshot_id + dummy, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + self.snapshot_id = self.get_snapshot_id(self.data['name']) + + if self.snapshot_id is None and 'fileSystemId' in self.data: + self.filesystem_id = self.get_filesystem_id() + self.data['fileSystemId'] = self.filesystem_id + if self.filesystem_id is None: + self.module.fail_json(msg='Error: Specified filesystem id %s does not exist ' % self.data['fileSystemId']) + + cd_action = self.na_helper.get_cd_action(self.snapshot_id, self.data) + result_message = "" + if self.na_helper.changed: + if self.module.check_mode: + # Skip changes + result_message = "Check mode, skipping changes" + else: + if cd_action == "delete": + self.delete_snapshot(self.snapshot_id) + result_message = "Snapshot Deleted" + + elif cd_action == "create": + if 'from_name' in self.data: + # If cd_action is craete and from_name is given + snapshot_id = self.get_snapshot_id(self.data['from_name']) + if snapshot_id is not None: + # If resource pointed by from_name exists, rename the snapshot to name + self.rename_snapshot(snapshot_id) + result_message = "Snapshot Updated" + else: + # If resource pointed by from_name does not exists, error out + self.module.fail_json(msg="Resource does not exist : %s" % self.data['from_name']) + else: + self.create_snapshot() + # If from_name is not defined, Create from scratch. + result_message = "Snapshot Created" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message) + + +def main(): + """ + Main function + """ + aws_netapp_cvs_snapshots = AwsCvsNetappSnapshot() + aws_netapp_cvs_snapshots.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/aws/requirements.txt b/ansible_collections/netapp/aws/requirements.txt new file mode 100644 index 000000000..663bd1f6a --- /dev/null +++ b/ansible_collections/netapp/aws/requirements.txt @@ -0,0 +1 @@ +requests \ No newline at end of file diff --git a/ansible_collections/netapp/aws/tests/unit/compat/__init__.py b/ansible_collections/netapp/aws/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/aws/tests/unit/compat/builtins.py b/ansible_collections/netapp/aws/tests/unit/compat/builtins.py new file mode 100644 index 000000000..f60ee6782 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/netapp/aws/tests/unit/compat/mock.py b/ansible_collections/netapp/aws/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/aws/tests/unit/compat/unittest.py b/ansible_collections/netapp/aws/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/aws/tests/unit/plugins/module_utils/test_netapp.py b/ansible_collections/netapp/aws/tests/unit/plugins/module_utils/test_netapp.py new file mode 100644 index 000000000..2fb3b7ba0 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/plugins/module_utils/test_netapp.py @@ -0,0 +1,195 @@ +# Copyright (c) 2018 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os.path +import sys +import tempfile + +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.aws.tests.unit.compat.mock import patch + +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +SRR = { + 'empty_good': (dict(), None), + 'get_data': (dict(records=['data1', 'data2']), None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), +} + + +def mock_args(feature_flags=None): + args = { + 'api_key': 'api_key', + 'api_url': 'api_url', + 'secret_key': 'secret_key!', + } + if feature_flags is not None: + args.update({'feature_flags': feature_flags}) + return args + + +def create_module(args): + argument_spec = netapp_utils.aws_cvs_host_argument_spec() + set_module_args(args) + module = basic.AnsibleModule(argument_spec) + return module + + +def create_restapi_object(args): + module = create_module(args) + module.fail_json = fail_json + rest_api = netapp_utils.AwsCvsRestAPI(module) + return rest_api + + +class mockResponse: + def __init__(self, json_data, status_code, raise_action=None): + self.json_data = json_data + self.status_code = status_code + self.content = json_data + self.raise_action = raise_action + + def raise_for_status(self): + pass + + def json(self): + if self.raise_action == 'bad_json': + raise ValueError(self.raise_action) + return self.json_data + + +@patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.send_request') +def test_empty_get(mock_request): + ''' get with no data ''' + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'], + ] + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert not error + assert len(message) == 0 + + +@patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.send_request') +def test_get_data(mock_request): + ''' get with data ''' + mock_request.side_effect = [ + SRR['get_data'], + SRR['end_of_sequence'], + ] + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert not error + print('get:', message) + assert message['records'] == SRR['get_data'][0]['records'] + + +def test_has_feature_success_default_0(): + ''' existing feature_flag with default of False''' + flag = 'trace_apis' + module = create_module(mock_args()) + value = netapp_utils.has_feature(module, flag) + assert not value + + +def test_has_feature_success_default_1(): + ''' existing feature_flag with default of True''' + flag = 'strict_json_check' + module = create_module(mock_args()) + value = netapp_utils.has_feature(module, flag) + assert value + + +def test_has_feature_success_user_true(): + ''' existing feature_flag with value set to True ''' + flag = 'user_deprecation_warning' + args = dict(mock_args({flag: True})) + module = create_module(args) + value = netapp_utils.has_feature(module, flag) + assert value + + +def test_has_feature_success_user_false(): + ''' existing feature_flag with value set to False ''' + flag = 'user_deprecation_warning' + args = dict(mock_args({flag: False})) + print(args) + module = create_module(args) + value = netapp_utils.has_feature(module, flag) + assert not value + + +def test_has_feature_invalid_key(): + ''' existing feature_flag with unknown key ''' + flag = 'deprecation_warning_bad_key' + module = create_module(mock_args()) + # replace ANsible fail method with ours + module.fail_json = fail_json + with pytest.raises(AnsibleFailJson) as exc: + netapp_utils.has_feature(module, flag) + msg = 'Internal error: unexpected feature flag: %s' % flag + assert exc.value.args[0]['msg'] == msg + + +@patch('requests.request') +def test_empty_get_sent(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data=dict(_links='me'), status_code=200) + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert not error + # only one key (_links) + assert len(message) == 1 + + +@patch('requests.request') +def test_empty_get_sent_bad_json(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='anything', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert error + assert 'Expecting json, got: anything' in error + print('errors:', rest_api.errors) + print('debug:', rest_api.debug_logs) + + +@patch('requests.request') +def test_empty_get_sent_bad_but_empty_json(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert error is None + assert message is None diff --git a/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py new file mode 100644 index 000000000..98755b939 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_active_directory.py @@ -0,0 +1,117 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.aws.tests.unit.compat import unittest +from ansible_collections.netapp.aws.tests.unit.compat.mock import patch +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_active_directory \ + import AwsCvsNetappActiveDir as ad_module + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict({ + 'state': 'present', + 'DNS': '101.102.103.123', + 'domain': 'mydomain.com', + 'password': 'netapp1!', + 'username': 'myuser', + 'api_url': 'myapiurl.com', + 'secret_key': 'mysecretkey', + 'api_key': 'myapikey' + }) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'DNS': '101.102.103.123', + 'domain': 'mydomain.com', + 'password': 'netapp1!', + 'region': 'us-east-1', + 'netBIOS': 'testing', + 'username': 'myuser', + 'api_url': 'myapiurl.com', + 'secret_key': 'mysecretkey', + 'api_key': 'myapikey' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + ad_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_module_fail_when_required_args_present(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + ad_module() + exit_json(changed=True, msg="TestCase Fail when required ars are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_active_directory.AwsCvsNetappActiveDir.get_activedirectory_id') + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_active_directory.AwsCvsNetappActiveDir.get_activedirectory') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post') + def test_create_aws_netapp_cvs_activedir(self, get_post_api, get_aws_api, get_ad_id): + set_module_args(self.set_default_args_pass_check()) + my_obj = ad_module() + + get_aws_api.return_value = None + get_post_api.return_value = None, None + get_ad_id.return_value = "123" + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_active_directory: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py new file mode 100644 index 000000000..b5a4bad84 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_filesystems.py @@ -0,0 +1,155 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests AWS CVS FileSystems Ansible module: aws_netapp_cvs_filesystems''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.aws.tests.unit.compat import unittest +from ansible_collections.netapp.aws.tests.unit.compat.mock import patch +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems \ + import AwsCvsNetappFileSystem as fileSystem_module + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict({ + 'creationToken': 'TestFilesystem', + 'region': 'us-east-1', + 'quotaInBytes': 3424, + 'serviceLevel': 'standard', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'creationToken': 'TestFilesystem', + 'region': 'us-east-1', + 'quotaInBytes': 3424, + 'serviceLevel': 'standard', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_create_aws_netapp_cvs_filesystems(self): + return dict({ + 'state': 'present', + 'creationToken': 'TestFilesystem', + 'region': 'us-east-1', + 'quotaInBytes': 3424, + 'serviceLevel': 'standard', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_delete_aws_netapp_cvs_filesystems(self): + return dict({ + 'state': 'absent', + 'creationToken': 'TestFilesystem', + 'region': 'us-east-1', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + fileSystem_module() + print('Info: test_module_fail_when_required_args_missing: %s' % exc.value.args[0]['msg']) + + def test_module_fail_when_required_args_present(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + fileSystem_module() + exit_json(changed=True, msg="Induced arguments check") + print('Info: test_module_fail_when_required_args_present: %s' % exc.value.args[0]['msg']) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems.AwsCvsNetappFileSystem.get_filesystem_id') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.get_state') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post') + def test_create_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_state_api, get_filesystem_id): + set_module_args(self.set_args_create_aws_netapp_cvs_filesystems()) + my_obj = fileSystem_module() + get_filesystem_id.return_value = None + get_state_api.return_value = 'done' + response = {'jobs': [{'jobId': 'dummy'}]} + get_post_api.return_value = response, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_filesystem_pass: %s' % repr(exc.value.args[0])) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems.AwsCvsNetappFileSystem.get_filesystem_id') + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_filesystems.AwsCvsNetappFileSystem.get_filesystem') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.get_state') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete') + def test_delete_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_state_api, get_filesystem, get_filesystem_id): + set_module_args(self.set_args_delete_aws_netapp_cvs_filesystems()) + my_obj = fileSystem_module() + get_filesystem_id.return_value = '432-432-532423-4232' + get_filesystem.return_value = 'dummy' + get_state_api.return_value = 'done' + response = {'jobs': [{'jobId': 'dummy'}]} + get_post_api.return_value = response, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_filesyste_pass: %s' % repr(exc.value.args[0])) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py new file mode 100644 index 000000000..26e822de7 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_pool.py @@ -0,0 +1,258 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' Unit tests for AWS Cloud Volumes Services - Manage Pools ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.aws.tests.unit.compat import unittest +from ansible_collections.netapp.aws.tests.unit.compat.mock import patch +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool \ + import NetAppAWSCVS as pool_module + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict({ + 'from_name': 'TestPoolAA', + 'name': 'TestPoolAA_new', + 'serviceLevel': 'standard', + 'sizeInBytes': 4000000000000, + 'vendorID': 'ansiblePoolTestVendorA', + 'region': 'us-east-1', + 'api_url': 'hostname.invalid', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'from_name': 'TestPoolAA', + 'name': 'TestPoolAA_new', + 'serviceLevel': 'standard', + 'sizeInBytes': 4000000000000, + 'vendorID': 'ansiblePoolTestVendorA', + 'region': 'us-east-1', + 'api_url': 'hostname.invalid', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_create_aws_netapp_cvs_pool(self): + return dict({ + 'state': 'present', + 'name': 'TestPoolAA', + 'serviceLevel': 'standard', + 'sizeInBytes': 4000000000000, + 'vendorID': 'ansiblePoolTestVendorA', + 'region': 'us-east-1', + 'api_url': 'hostname.invalid', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_update_aws_netapp_cvs_pool(self): + return dict({ + 'state': 'present', + 'from_name': 'TestPoolAA', + 'name': 'TestPoolAA_new', + 'serviceLevel': 'standard', + 'sizeInBytes': 4000000000000, + 'vendorID': 'ansiblePoolTestVendorA', + 'region': 'us-east-1', + 'api_url': 'hostname.invalid', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_delete_aws_netapp_cvs_pool(self): + return dict({ + 'state': 'absent', + 'name': 'TestPoolAA', + 'region': 'us-east-1', + 'api_url': 'hostname.invalid', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + pool_module() + print('Info: test_module_fail_when_required_args_missing: %s' % exc.value.args[0]['msg']) + + def test_module_pass_when_required_args_present(self): + ''' required arguments are present ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + pool_module() + exit_json(changed=True, msg="Induced arguments check") + print('Info: test_module_pass_when_required_args_present: %s' % exc.value.args[0]['msg']) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.put') + def test_update_aws_netapp_cvs_pool_pass(self, get_put_api, get_aws_api): + set_module_args(self.set_args_update_aws_netapp_cvs_pool()) + my_obj = pool_module() + my_pool = { + "name": "Dummyname", + "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975", + "region": "us-east-1", + "serviceLevel": "extreme", + "sizeInBytes": 40000000000000000, + "state": "available", + "vendorID": "Dummy" + } + get_aws_api.return_value = my_pool + get_put_api.return_value = my_pool, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_update_aws_netapp_cvs_pool_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.put') + def test_update_aws_netapp_cvs_pool_fail(self, get_put_api, get_aws_api): + set_module_args(self.set_args_update_aws_netapp_cvs_pool()) + my_obj = pool_module() + my_pool = { + "name": "Dummyname", + "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975", + "region": "us-east-1", + "serviceLevel": "extreme", + "sizeInBytes": 40000000000000000, + "state": "available", + "vendorID": "Dummy" + } + get_put_api.return_value = my_pool, "Error" + get_aws_api.return_value = my_pool + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: test_update_aws_netapp_cvs_pool_fail: %s' % repr(exc.value)) + assert exc.value.args[0]['msg'] is not None + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post') + def test_create_aws_netapp_cvs_pool_pass(self, get_post_api, get_aws_api): + set_module_args(self.set_args_create_aws_netapp_cvs_pool()) + my_obj = pool_module() + get_aws_api.return_value = None + get_post_api.return_value = None, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_pool_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post') + def test_create_aws_netapp_cvs_pool_fail(self, get_post_api, get_aws_api): + set_module_args(self.set_args_create_aws_netapp_cvs_pool()) + my_obj = pool_module() + my_pool = { + "name": "Dummyname", + "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975", + "region": "us-east-1", + "serviceLevel": "extreme", + "sizeInBytes": 40000000000000000, + "state": "available", + "vendorID": "Dummy" + } + get_post_api.return_value = my_pool, "Error" + get_aws_api.return_value = None + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_pool_fail: %s' % repr(exc.value)) + assert exc.value.args[0]['msg'] is not None + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete') + def test_delete_aws_netapp_cvs_pool_pass(self, get_delete_api, get_aws_api): + set_module_args(self.set_args_delete_aws_netapp_cvs_pool()) + my_obj = pool_module() + my_pool = { + "name": "Dummyname", + "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975", + "region": "us-east-1", + "serviceLevel": "extreme", + "sizeInBytes": 40000000000000000, + "state": "available", + "vendorID": "Dummy" + } + get_aws_api.return_value = my_pool + get_delete_api.return_value = None, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_aws_netapp_cvs_pool_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_pool.NetAppAWSCVS.get_aws_netapp_cvs_pool') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete') + def test_delete_aws_netapp_cvs_pool_fail(self, get_delete_api, get_aws_api): + set_module_args(self.set_args_delete_aws_netapp_cvs_pool()) + my_obj = pool_module() + my_pool = { + "name": "Dummyname", + "poolId": "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975", + "region": "us-east-1", + "serviceLevel": "extreme", + "sizeInBytes": 40000000000000000, + "state": "available", + "vendorID": "Dummy" + } + get_delete_api.return_value = my_pool, "Error" + get_aws_api.return_value = my_pool + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: test_delete_aws_netapp_cvs_pool_fail: %s' % repr(exc.value)) + assert exc.value.args[0]['msg'] is not None diff --git a/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py new file mode 100644 index 000000000..bb825a2ee --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/plugins/modules/test_aws_netapp_cvs_snapshots.py @@ -0,0 +1,147 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests NetApp AWS CVS Snapshots Ansible module: aws_netapp_cvs_snapshots''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.aws.tests.unit.compat import unittest +from ansible_collections.netapp.aws.tests.unit.compat.mock import patch +import ansible_collections.netapp.aws.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots \ + import AwsCvsNetappSnapshot as snapshot_module + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict({ + 'name': 'TestFilesystem', + 'fileSystemId': 'standard', + 'from_name': 'from_TestFilesystem', + 'region': 'us-east-1', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'testSnapshot', + 'fileSystemId': 'standard', + 'from_name': 'from_TestFilesystem', + 'region': 'us-east-1', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_create_aws_netapp_cvs_snapshots(self): + return dict({ + 'state': 'present', + 'name': 'testSnapshot', + 'fileSystemId': '123-4213-432-432', + 'region': 'us-east-1', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def set_args_delete_aws_netapp_cvs_snapshots(self): + return dict({ + 'state': 'absent', + 'name': 'testSnapshot', + 'region': 'us-east-1', + 'api_url': 'hostname.com', + 'api_key': 'myapikey', + 'secret_key': 'mysecretkey' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + snapshot_module() + print('Info: test_module_fail_when_required_args_missing: %s' % exc.value.args[0]['msg']) + + def test_module_fail_when_required_args_present(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + snapshot_module() + exit_json(changed=True, msg="Induced arguments check") + print('Info: test_module_fail_when_required_args_present: %s' % exc.value.args[0]['msg']) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots.AwsCvsNetappSnapshot.get_snapshot_id') + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots.AwsCvsNetappSnapshot.get_filesystem_id') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.post') + def test_create_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_filesystem_id, get_snapshot_id): + set_module_args(self.set_args_create_aws_netapp_cvs_snapshots()) + my_obj = snapshot_module() + get_filesystem_id.return_value = 'fiesystemName' + get_snapshot_id.return_value = None + get_post_api.return_value = None, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_snapshots_pass: %s' % repr(exc.value.args[0])) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.aws.plugins.modules.aws_netapp_cvs_snapshots.AwsCvsNetappSnapshot.get_snapshot_id') + @patch('ansible_collections.netapp.aws.plugins.module_utils.netapp.AwsCvsRestAPI.delete') + def test_delete_aws_netapp_cvs_snapshots_pass(self, get_post_api, get_snapshot_id): + set_module_args(self.set_args_delete_aws_netapp_cvs_snapshots()) + my_obj = snapshot_module() + get_snapshot_id.return_value = "1f63b3d0-4fd4-b4fe-1ed6-c62f5f20d975" + get_post_api.return_value = None, None + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_aws_netapp_cvs_snapshots_pass: %s' % repr(exc.value.args[0])) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/aws/tests/unit/requirements.txt b/ansible_collections/netapp/aws/tests/unit/requirements.txt new file mode 100644 index 000000000..b754473a9 --- /dev/null +++ b/ansible_collections/netapp/aws/tests/unit/requirements.txt @@ -0,0 +1 @@ +requests ; python_version >= '2.7' diff --git a/ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..ed35f3ddd --- /dev/null +++ b/ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,210 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to report a bug in netapp.azure!** + + + ⚠ + Verify first that your issue is not [already reported on + GitHub][issue search] and keep in mind that we may have to keep + the current behavior because [every change breaks someone's + workflow][XKCD 1172]. + We try to be mindful about this. + + Also test if the latest release and devel branch are affected too. + + + **Tip:** If you are seeking community support, please consider + [Join our Slack community][ML||IRC]. + + + + [ML||IRC]: + https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg + + [issue search]: ../search?q=is%3Aissue&type=issues + + [XKCD 1172]: https://xkcd.com/1172/ + + +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with netapp.azure from the devel branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + + + **Tip:** Cannot find it in this repository? Please be advised that + the source for some parts of the documentation are hosted outside + of this repository. If the page you are reporting describes + modules/plugins/etc that are not officially supported by the + Ansible Core Engineering team, there is a good chance that it is + coming from one of the [Ansible Collections maintained by the + community][collections org]. If this is the case, please make sure + to file an issue under the appropriate project there instead. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` below, under + the prompt line. Please don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + value: | + $ ansible --version + placeholder: | + $ ansible --version + ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) + config file = None + configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = ~/src/github/ansible/ansible/lib/ansible + ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections + executable location = bin/ansible + python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] + jinja version = 2.11.3 + libyaml = True + validations: + required: true + +- type: textarea + attributes: + label: Azure NetApp Files Collection Version + description: >- + Azure NetApp Files Collection Version. Run `ansible-galaxy collection` and copy the entire output + render: console + value: | + $ ansible-galaxy collection list + validations: + required: true + +- type: textarea + attributes: + label: Playbook + description: >- + The task from the playbook that is give you the issue + render: console + validations: + required: true + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: | + 1. Implement the following playbook: + + ```yaml + --- + # ping.yml + - hosts: all + gather_facts: false + tasks: + - ping: + ... + ``` + 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv` + 3. An error occurs. + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y and was shocked + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output and don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + placeholder: >- + Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))} + Exception: + Traceback (most recent call last): + File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main + status = self.run(options, args) + File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run + wb.build(autobuilding=True) + File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build + self.requirement_set.prepare_files(self.finder) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files + ignore_dependencies=self.ignore_dependencies)) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file + session=self.session, hashes=hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url + hashes=hashes + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url + hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url + stream=True, + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get + return self.request('GET', url, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request + return super(PipSession, self).request(method, url, *args, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request + resp = self.send(prep, **send_kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send + r = adapter.send(request, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send + resp = super(CacheControlAdapter, self).send(request, **kw) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send + raise SSLError(e, request=request) + SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),)) + ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2. + ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1. + validations: + required: true + + +- type: markdown + attributes: + value: > + *One last thing...* + + + Thank you for your collaboration! + + +... diff --git a/ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..621d52529 --- /dev/null +++ b/ansible_collections/netapp/azure/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,100 @@ +--- +name: ✨ Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to suggest a feature for netapp.azure!** + + 💡 + Before you go ahead with your request, please first consider if it + would be useful for majority of the netapp.azure users. As a + general rule of thumb, any feature that is only of interest to a + small sub group should be [implemented in a third-party Ansible + Collection][contribute to collections] or maybe even just your + project alone. Be mindful of the fact that the essential + netapp.azure features have a broad impact. + + +
+ + ❗ Every change breaks someone's workflow. + + + + [![❗ Every change breaks someone's workflow. + ](https://imgs.xkcd.com/comics/workflow.png) + ](https://xkcd.com/1172/) +
+ + + ⚠ + Verify first that your idea is not [already requested on + GitHub][issue search]. + + Also test if the main branch does not already implement this. + + +- type: textarea + attributes: + label: Summary + description: > + Describe the new feature/improvement you would like briefly below. + + + What's the problem this feature will solve? + + What are you trying to do, that you are unable to achieve + with netapp.azure as it currently stands? + + + * Provide examples of real-world use cases that this would enable + and how it solves the problem you described. + + * How do you solve this now? + + * Have you tried to work around the problem using other tools? + + * Could there be a different approach to solving this issue? + + placeholder: >- + I am trying to do X with netapp.azure from the devel branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of netapp.azure because of Z. + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: >- + I asked on https://stackoverflow.com/.... and the community + advised me to do X, Y and Z. + validations: + required: true + +... diff --git a/ansible_collections/netapp/azure/.github/workflows/coverage.yml b/ansible_collections/netapp/azure/.github/workflows/coverage.yml new file mode 100644 index 000000000..924232e4c --- /dev/null +++ b/ansible_collections/netapp/azure/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.azure Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on Azure + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.11 + run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/azure/ + rsync -av . ansible_collections/netapp/azure/ --exclude ansible_collections/netapp/azure/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/azure/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/azure/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/azure/ + verbose: true \ No newline at end of file diff --git a/ansible_collections/netapp/azure/.github/workflows/main.yml b/ansible_collections/netapp/azure/.github/workflows/main.yml new file mode 100644 index 000000000..7786df5a7 --- /dev/null +++ b/ansible_collections/netapp/azure/.github/workflows/main.yml @@ -0,0 +1,64 @@ +name: NetApp Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity_azure: + name: Sanity (${{ matrix.ansible }} on Azure ANF) + runs-on: ubuntu-latest + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - devel + collection: [azure] + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Delete collection tar file (azure) + run: rm -f /tmp/netapp-azure* + + - name: Check collection path (azure) + run: | + pwd + ls + + - name: Build collection (azure) + run: ansible-galaxy collection build --output-path /tmp . + + - name: Install collection (azure) + run: ansible-galaxy collection install /tmp/netapp-azure* + + - name: Delete collection tar file (azure) + run: rm -f /tmp/netapp-azure* + + - name: Run sanity tests (azure) + run: ansible-test sanity --docker -v --color + working-directory: /home/runner/.ansible/collections/ansible_collections/netapp/azure + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/azure/ + rsync -av . ansible_collections/netapp/azure/ --exclude ansible_collections/netapp/azure/ + + - name: Run Unit Tests + run: ansible-test units --docker -v --color + working-directory: ansible_collections/netapp/azure/ diff --git a/ansible_collections/netapp/azure/CHANGELOG.rst b/ansible_collections/netapp/azure/CHANGELOG.rst new file mode 100644 index 000000000..fc18835a2 --- /dev/null +++ b/ansible_collections/netapp/azure/CHANGELOG.rst @@ -0,0 +1,171 @@ +================================================= +Azure NetApp Files (ANF) Collection Release Notes +================================================= + +.. contents:: Topics + + +v21.10.0 +======== + +Minor Changes +------------- + +- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +v21.9.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_volume - new option ``feature_flags`` to selectively enable/disable a feature. + +Bugfixes +-------- + +- azure_rm_netapp_volume - 'Change Ownership' is not permitted when creating NFSv4.1 volume with latest azure-mgmt-netapp package (4.0.0). + +v21.8.1 +======= + +Bugfixes +-------- + +- Hub Automation cannot generate documentation (cannot use doc fragments from another collection). + +v21.8.0 +======= + +Bugfixes +-------- + +- fix CI pipeline as azcollection does not support python 2.6. +- fix CI pipeline as ignores are not required with latest azcollection. + +v21.7.0 +======= + +Bugfixes +-------- + +- fix CI pipeline to work with azcollection, and isolate UTs from azcollection. + +v21.6.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_account - support additional authentication schemes provided by AzureRMModuleBase. +- azure_rm_netapp_capacity_pool - support additional authentication schemes provided by AzureRMModuleBase, and tags. +- azure_rm_netapp_capacity_pool - wait for completion when creating, modifying, or deleting a pool. +- azure_rm_netapp_snapshot - support additional authentication schemes provided by AzureRMModuleBase. +- azure_rm_netapp_snapshot - wait for completion when creating, modifying, or deleting a pool. +- azure_rm_netapp_volume - support additional authentication schemes provided by AzureRMModuleBase, and tags. + +v21.5.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_volume - enable changes in volume size. +- azure_rm_netapp_volume - rename msg to mount_path, as documented in RETURN. + +v21.3.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_account - new option ``active_directories`` to support SMB volumes. +- azure_rm_netapp_account - new suboptions ``ad_name``, ``kdc_ip``, ``service_root_ca_certificate``` for Active Directory. +- azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. + +Bugfixes +-------- + +- azure_rm_netapp_account - wait for job completion for asynchroneous requests, and report belated errors. +- support for azure-mgmt-netapp 1.0.0, while maintaining compatibility with 0.10.0. + +v21.2.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_account - new option ``active_directories`` to support SMB volumes. +- azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. +- azure_rm_netapp_volume - new option ``subnet_name`` as subnet_id is ambiguous. subnet_id is now aliased to subnet_name. + +Bugfixes +-------- + +- azure_rm_netapp_volume - fix 'Nonetype' object is not subscriptable exception when mount target is not created. + +v20.8.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_capacity_pool - Updated ANF capacity pool modify function for size parameter mandatory issue. +- use a three group format for version_added. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + +v20.7.0 +======= + +Bugfixes +-------- + +- azure_rm_netapp_capacity_pool - fixed idempotency for delete operation. + +v20.6.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_capacity_pool - now allows modify for size. +- azure_rm_netapp_volume - now returns complete mount_path of the volume specified. + +v20.5.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_account - new option ``tags``. +- azure_rm_netapp_capacity_pool - new option ``service_level``. +- azure_rm_netapp_volume - new option ``size``. +- azure_rm_netapp_volume - new option ``vnet_resource_group_for_subnet``, resource group for virtual_network and subnet_id to be used. +- azure_rm_netapp_volume - now returns mount_path of the volume specified. + +v20.4.0 +======= + +Bugfixes +-------- + +- fix changes to azure-mgmt-netapp as per new release. +- removed ONTAP dependency import. + +v20.2.0 +======= + +Bugfixes +-------- + +- galaxy.yml - fix path to github repository. + +v19.10.0 +======== + +New Modules +----------- + +- netapp.azure.azure_rm_netapp_account - Manage NetApp Azure Files Account +- netapp.azure.azure_rm_netapp_capacity_pool - Manage NetApp Azure Files capacity pool +- netapp.azure.azure_rm_netapp_snapshot - Manage NetApp Azure Files Snapshot +- netapp.azure.azure_rm_netapp_volume - Manage NetApp Azure Files Volume diff --git a/ansible_collections/netapp/azure/COPYING b/ansible_collections/netapp/azure/COPYING new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/ansible_collections/netapp/azure/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/azure/FILES.json b/ansible_collections/netapp/azure/FILES.json new file mode 100644 index 000000000..cbba60ea2 --- /dev/null +++ b/ansible_collections/netapp/azure/FILES.json @@ -0,0 +1,705 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fc6781dfd99664ea3df19e10ff3fd4cb5fcb9a1ffab3974bf29a072688fac2b", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05eeac3f09944a70935c66674fecd48fc0992c2706bc4168b10f54dff6517b6e", + "format": 1 + }, + { + "name": "plugins/doc_fragments/azure.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2fddafaafe92473737d8a9b23ad23ce82703b0144a9930f8ace78f7138144f9", + "format": 1 + }, + { + "name": "plugins/doc_fragments/azure_tags.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8c9d720dbc7605d3ee66799e8e81d0886e404c9a07b6b9b8edc844e0646de64", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/azure_rm_netapp_common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad77c0d169b499b3bbff9bfc98607b329da4c0939a30b862523cc238dfa349c8", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44ebf2058eef2cc291d84bda55a9d22745a54ea08244d2c3fa498c835a60412f", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_netapp_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57f7917d51e630e28625b9aed1e055fc993912079ec84066b9b4dc00a79fc076", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_netapp_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb97e5fe758ded5c061c587caa4b5ece7e5093aa8735d28b5915b8ffae10493d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_netapp_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "371e98c5eb914d5b5b29d5a38a0128a473a56503b24a0117cad094cd35fa4f68", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_netapp_capacity_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45ae9bf9be901a5744da83e0476439cc4afc583f996b5116e63a3ff1388789aa", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1776552fe8fe8ec7bc9bb0d8323b4892f297ac3a000d438ffce73c82487eb688", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26d17aa4e1be7ddd99dd8150f3f72726693835df4480f09f63d665ba4568054d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_azure_rm_netapp_volume_import.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fb1715ddd3de877a50b0f8dd97d5b7e4518ca309f81f188eeedf0da7bd8ed6d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_azure_rm_netapp_capacity_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35b9437b1e70cb214f9fb2599f48a2d51fb7f025be21487608a527000cdca225", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_azure_rm_netapp_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fb834aa39535b9eccaab1c67293e4fd44d96c2b8aec2bf963d3dee0e6e939be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_azure_rm_netapp_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42e2a55717c6640dc3343055a758b4f930b9010c9d49351dddcc86e828557d5a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_azure_rm_netapp_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d138b9ecf3ad3b50bc4757b2dce610f770523903da67ace63243b66fe1959857", + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_volume", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_volume/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_volume/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37ecd68e07721ba9b164a9cf650354e65d9b522a8fa80ee5dd0a0f85ecbbb82c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_volume/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_volume/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_volume/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_capacity_pool", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_capacity_pool/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_capacity_pool/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "725cf3d8200cc95ad524962e33008627f642ac6cb17daee6d833d7ad4990c904", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_capacity_pool/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_capacity_pool/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86ade1ce7ec530fef84440672efaf3b60c253946bb82f5f0a134a9691bc6ffad", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_capacity_pool/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d10b8feb73c18dc63753fe195137e2505f925ba1cbcdf5df43f67ad8c93f7a3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_snapshot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_snapshot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_snapshot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c1ad3044c2063456e66ffca7f717b0d5b01561685df50819b3ddab8f256f1e3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_snapshot/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_snapshot/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_account", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_account/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_account/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af6c1c6108bde6652e3e6c3fa84bbf33b9e8e8d214fd5e6e2096d8a900993adb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_account/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_account/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_netapp_account/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/runner", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/runner/requirements", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/runner/requirements/integration.cloud.azure.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48edd11776e45cc283be0d76efed83271645ff082d0d22c23d8a16ede3f13104", + "format": 1 + }, + { + "name": "tests/runner/requirements/unit.cloud.azure.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48edd11776e45cc283be0d76efed83271645ff082d0d22c23d8a16ede3f13104", + "format": 1 + }, + { + "name": "tests/runner/requirements/requirements-azure.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be5c6dc9b1fe6ffbec88625d6c225df060724e678bf4b179ee4acb6ba18a9fe2", + "format": 1 + }, + { + "name": "tests/runner/requirements/units.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48edd11776e45cc283be0d76efed83271645ff082d0d22c23d8a16ede3f13104", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57bdfe68549cbe4f7282f9937425595fee23b756980f10c7c0c2bcb298ac057d", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3935.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "275e9147e14cace400fea4ad8d2d88e2a3c41daa65545cf5ade52ffac5bea3be", + "format": 1 + }, + { + "name": "changelogs/fragments/20.5.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e801f1b985b89427664b5bbc3e86c7f94e0515fe71584171438bdd2e6423f802", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3704.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f7bf4dd5809a694c63075c88409b71d97b3c636fb4c60883e23e1d9c6acf256", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3505.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2117a356f790e63f26cbe90c1b8cbc8e66bc4ba44a3cec0d0f219bb3dee6a663", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3949.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46256df687411ac1d89895e32713a4830ccffcf1bb823fbf88597481881aa67f", + "format": 1 + }, + { + "name": "changelogs/fragments/20.7.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95ad8a5b1d71afce933fdd69b3116309a6d9a559416d8ab3641470eb5286ee9f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3663.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ede05991f45025041ce7305e06f27f7c7747b0a5e3c61d11e5775796315ad801", + "format": 1 + }, + { + "name": "changelogs/fragments/20.2.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91918f48a406834778ff7163c92c12dd1802c0620cb681ee66f8a4709444cf5e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4001.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9b63b1230f04b472b8d55d5bd616c1687972ec14126e643de62788c9788708a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3526.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61cb464ce2c8e6aaf9a74beca5e246e4010ee68f2632eba0347e5cf3267e9510", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4246.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87733f8557c5f0c13302e47e18712cc094f8e1cf63665519a340c74baee95e1b", + "format": 1 + }, + { + "name": "changelogs/fragments/20.4.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4fd861b273aceb9ec0a456ddbae2a6f82bdd6a2244b81e9b1e903a59eaa83bf", + "format": 1 + }, + { + "name": "changelogs/fragments/20.8.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "266926d348a95b55d65ff99683f9bdfe7ad1302731c08964bb86ce714272d86c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4135.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b73cac3f25ff245e6156120be0b828339193515410dfe1746e9fe58b2fc5d1a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4070.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef62606cf209269c701e931090525e063781b9990853a20f718f250bbccd182d", + "format": 1 + }, + { + "name": "changelogs/fragments/20.6.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5988b539e04f6be548b4f07142e208adc5974ed6d67511f2efcd5f721598124", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3849.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfaa25e04a5dcb6f13b27a52d79dd1ee8a06002d028a86a09184b58c431fc5fd", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4416.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdb1023b66dee056bc12c35cf7326a01c0ce7a8c1eceea032270fd24db9e1467", + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5297a0a490809a3f9e664aebea58cf1b7c48af79e7ac4a9e1be5845359d7d016", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edbc85fcd35436d9f71c7b5f3247522276ebc4fb00567a74873b163adcadf020", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cb633ca474a7fb30d6f8c73956c921b1f3043850ff221fcf3d84afe4921696d", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "081b4ec5fb77d7676357b8600b547e7b2cbad8eb340333305ef5d448d832510b", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b13e2320cd388d55ecad4a6b8785ae58481464d8b44ff6494c39ca04f48c72b5", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ad81e92ae7b1fbef37afde2fc57466d3d7dd20d5ab9bba1e01e0faac83228cf", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef5f731a1c7790ce52a685a93370a5d0a5523bf75b9b0a0f0d9cc50171c60ac0", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ee760ffaaed6d3d476d1eb9fda880bbaf3a2247a5014428c058282b597131a7", + "format": 1 + }, + { + "name": "HACK.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0757d4b54e08f27761851d37143cbe15d58c324db2968fe157546992592bb382", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/azure/HACK.md b/ansible_collections/netapp/azure/HACK.md new file mode 100644 index 000000000..cbd239e0e --- /dev/null +++ b/ansible_collections/netapp/azure/HACK.md @@ -0,0 +1,13 @@ +Because of an issue in Ansible, Hub Automation cannot use doc fragments from an external collection as: +``` + - azure.azcollection.azure + - azure.azcollection.azure_tags +``` + +Red Hat asked us to make local copies of the azcollection doc fragments. They are in +``` +ansible_collections/netapp/azure/plugins/doc_fragments/azure.py +ansible_collections/netapp/azure/plugins/doc_fragments/azure_tags.py +``` + +Once the Ansible issue is fixed, we should remove these copies, as they may be out of sync with the azcollection. diff --git a/ansible_collections/netapp/azure/MANIFEST.json b/ansible_collections/netapp/azure/MANIFEST.json new file mode 100644 index 000000000..4fcd3fa5e --- /dev/null +++ b/ansible_collections/netapp/azure/MANIFEST.json @@ -0,0 +1,37 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "azure", + "version": "21.10.0", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "storage", + "cloud", + "netapp", + "cvs", + "anf", + "azure" + ], + "description": "Azure NetApp Files (ANF)", + "license": [], + "license_file": "COPYING", + "dependencies": { + "azure.azcollection": ">=1.0.0" + }, + "repository": "https://github.com/ansible-collections/netapp.azure", + "documentation": null, + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": "https://github.com/ansible-collections/netapp.azure/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2d51ddefb3ce0ffdef8c468f22c2dbb09bc3c9828b31141ec8c85d8f37d0d59", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/azure/README.md b/ansible_collections/netapp/azure/README.md new file mode 100644 index 000000000..f56a07ead --- /dev/null +++ b/ansible_collections/netapp/azure/README.md @@ -0,0 +1,157 @@ +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/azure/index.html) +![example workflow](https://github.com/ansible-collections/netapp.azure/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.azure/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.azure) +============================================================= + +netapp.azure + +Azure NetApp Files (ANF) Collection + +Copyright (c) 2019 NetApp, Inc. All rights reserved. +Specifications subject to change without notice. + +============================================================= + +# Installation +```bash +ansible-galaxy collection install netapp.azure +``` +To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module +``` +collections: + - netapp.azure +``` + +# Module documentation +https://docs.ansible.com/ansible/devel/collections/netapp/azure/ + +# Need help +Join our Slack Channel at [Netapp.io](http://netapp.io/slack) + +# Requirements +- python >= 2.7 +- azure >= 2.0.0 +- Python azure-mgmt. Install using ```pip install azure-mgmt``` +- Python azure-mgmt-netapp. Install using ```pip install azure-mgmt-netapp``` +- For authentication with Azure NetApp log in before you run your tasks or playbook with 'az login'. + +# Code of Conduct +This collection follows the [Ansible project's Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). + +# Release Notes + +## 21.10.0 + +### Minor changes + - all modules - allow usage of Ansible module group defaults - for Ansible 2.12+. + +## 21.9.0 + +### New Options + - azure_rm_netapp_volume - `feature_flags` to selectively enable/disable a feature. + +### Bug Fixes + - azure_rm_netapp_volume - 'Change Ownership' is not permitted when creating NFSv4.1 volume with latest azure-mgmt-netapp package (4.0.0). + +## 21.8.1 + +### Bug Fixes + - Hub Automation cannot generate documentation (cannot use doc fragments from another collection). + +## 21.8.0 + +### Bug Fixes + +- fix CI pipeline as azcollection does not support python 2.6. +- fix CI pipeline as ignores are not required with latest azcollection. + +## 21.7.0 + +### Bug Fixes + +- fix CI pipeline to work with azcollection, and isolate UTs from azcollection. + +## 21.6.0 + +### Minor changes + + - azure_rm_netapp_account - support additional authentication schemes provided by AzureRMModuleBase. + - azure_rm_netapp_capacity_pool - support additional authentication schemes provided by AzureRMModuleBase, and tags. + - azure_rm_netapp_capacity_pool - wait for completion when creating, modifying, or deleting a pool. + - azure_rm_netapp_snapshot - support additional authentication schemes provided by AzureRMModuleBase. + - azure_rm_netapp_snapshot - wait for completion when creating or deleting a snapshot. + - azure_rm_netapp_volume - support additional authentication schemes provided by AzureRMModuleBase, and tags. + +## 21.5.0 + +### Minor changes + - azure_rm_netapp_volume - enable changes in volume size. + - azure_rm_netapp_volume - rename msg to mount_path, as documented in RETURN. + +## 21.3.0 + +### New Options + - azure_rm_netapp_account - new suboptions `ad_name`, `kdc_ip`, `service_root_ca_certificate` for Active Directory. + +### Bug Fixes + - support for azure-mgmt-netapp 1.0.0, while maintaining compatibility with 0.10.0. + - azure_rm_netapp_account - wait for job completion for asynchroneous requests, and report belated errors. + +## 21.2.0 + +### New Options + - azure_rm_netapp_account: new option `active_directories` to support SMB volumes. + - azure_rm_netapp_volume: new option `protocol_types` to support SMB volumes. + +## 21.1.0 + +### New Options + - azure_rm_netapp_volume - new option `subnet_name` as subnet_id is ambiguous. subnet_id is now aliased to subnet_name. + +### Bug Fixes + - azure_rm_netapp_volume - fix 'Nonetype' object is not subscriptable exception when mount target is not created. + +## 20.8.0 + +### Module documentation changes +- azure_rm_netapp_capacity_pool: Updated ANF capacity pool modify function for `size` parameter mandatory issue. +- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + +## 20.7.0 + +### Bug Fixes +- azure_rm_netapp_capacity_pool: fixed idempotency for delete operation. + +## 20.6.0 + +### New Options +- azure_rm_netapp_capacity_pool: now allows modify for size. +- azure_rm_netapp_volume: now returns complete mount_path of the volume specified. + +## 20.5.0 + +### New Options +- azure_rm_netapp_account: new option `tags`. +- azure_rm_netapp_capacity_pool: new option `service_level`. +- azure_rm_netapp_volume: new option `size`. +- azure_rm_netapp_volume: now returns mount_path of the volume specified. +- azure_rm_netapp_volume: new option `vnet_resource_group_for_subnet`, resource group for virtual_network and subnet_id to be used. + +## 20.4.0 + +### Bug Fixes +- fix changes to azure-mgmt-netapp as per new release. +- removed ONTAP dependency import. + +## 20.2.0 + +### Bug Fixes +- galaxy.yml: fix path to github repository. + +## 19.11.0 +- Initial release. +### New Modules +- azure_rm_netapp_account: create/delete NetApp Azure Files Account. +- azure_rm_netapp_capacity_pool: create/delete NetApp Azure Files capacity pool. +- azure_rm_netapp_snapshot: create/delete NetApp Azure Files Snapshot. +- azure_rm_netapp_volume: create/delete NetApp Azure Files volume. diff --git a/ansible_collections/netapp/azure/changelogs/.plugin-cache.yaml b/ansible_collections/netapp/azure/changelogs/.plugin-cache.yaml new file mode 100644 index 000000000..6e247d8f7 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/.plugin-cache.yaml @@ -0,0 +1,35 @@ +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + azure_rm_netapp_account: + description: Manage NetApp Azure Files Account + name: azure_rm_netapp_account + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogrmmpa_fx.collections.ansible_collections.netapp.azure.plugins.modules + version_added: 19.10.0 + azure_rm_netapp_capacity_pool: + description: Manage NetApp Azure Files capacity pool + name: azure_rm_netapp_capacity_pool + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogrmmpa_fx.collections.ansible_collections.netapp.azure.plugins.modules + version_added: 19.10.0 + azure_rm_netapp_snapshot: + description: Manage NetApp Azure Files Snapshot + name: azure_rm_netapp_snapshot + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogrmmpa_fx.collections.ansible_collections.netapp.azure.plugins.modules + version_added: 19.10.0 + azure_rm_netapp_volume: + description: Manage NetApp Azure Files Volume + name: azure_rm_netapp_volume + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogrmmpa_fx.collections.ansible_collections.netapp.azure.plugins.modules + version_added: 19.10.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 21.8.0 diff --git a/ansible_collections/netapp/azure/changelogs/changelog.yaml b/ansible_collections/netapp/azure/changelogs/changelog.yaml new file mode 100644 index 000000000..71085dedf --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/changelog.yaml @@ -0,0 +1,169 @@ +ancestor: null +releases: + 19.10.0: + modules: + - description: Manage NetApp Azure Files Account + name: azure_rm_netapp_account + namespace: '' + - description: Manage NetApp Azure Files capacity pool + name: azure_rm_netapp_capacity_pool + namespace: '' + - description: Manage NetApp Azure Files Snapshot + name: azure_rm_netapp_snapshot + namespace: '' + - description: Manage NetApp Azure Files Volume + name: azure_rm_netapp_volume + namespace: '' + release_date: '2019-11-14' + 20.2.0: + changes: + bugfixes: + - galaxy.yml - fix path to github repository. + fragments: + - 20.2.0.yaml + release_date: '2020-02-05' + 20.4.0: + changes: + bugfixes: + - fix changes to azure-mgmt-netapp as per new release. + - removed ONTAP dependency import. + fragments: + - 20.4.0.yaml + release_date: '2020-04-21' + 20.5.0: + changes: + minor_changes: + - azure_rm_netapp_account - new option ``tags``. + - azure_rm_netapp_capacity_pool - new option ``service_level``. + - azure_rm_netapp_volume - new option ``size``. + - azure_rm_netapp_volume - new option ``vnet_resource_group_for_subnet``, resource + group for virtual_network and subnet_id to be used. + - azure_rm_netapp_volume - now returns mount_path of the volume specified. + fragments: + - 20.5.0.yaml + release_date: '2020-05-06' + 20.6.0: + changes: + minor_changes: + - azure_rm_netapp_capacity_pool - now allows modify for size. + - azure_rm_netapp_volume - now returns complete mount_path of the volume specified. + fragments: + - 20.6.0.yaml + release_date: '2020-06-03' + 20.7.0: + changes: + bugfixes: + - azure_rm_netapp_capacity_pool - fixed idempotency for delete operation. + fragments: + - 20.7.0.yaml + release_date: '2020-06-24' + 20.8.0: + changes: + minor_changes: + - azure_rm_netapp_capacity_pool - Updated ANF capacity pool modify function + for size parameter mandatory issue. + - use a three group format for version_added. So 2.7 becomes 2.7.0. Same thing + for 2.8 and 2.9. + fragments: + - 20.8.0.yaml + release_date: '2020-08-05' + 21.10.0: + changes: + minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + fragments: + - DEVOPS-4416.yaml + release_date: '2021-11-03' + 21.2.0: + changes: + bugfixes: + - azure_rm_netapp_volume - fix 'Nonetype' object is not subscriptable exception + when mount target is not created. + minor_changes: + - azure_rm_netapp_account - new option ``active_directories`` to support SMB + volumes. + - azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. + - azure_rm_netapp_volume - new option ``subnet_name`` as subnet_id is ambiguous. subnet_id + is now aliased to subnet_name. + fragments: + - DEVOPS-3505.yaml + release_date: '2021-02-04' + 21.3.0: + changes: + bugfixes: + - azure_rm_netapp_account - wait for job completion for asynchroneous requests, + and report belated errors. + - support for azure-mgmt-netapp 1.0.0, while maintaining compatibility with + 0.10.0. + minor_changes: + - azure_rm_netapp_account - new option ``active_directories`` to support SMB + volumes. + - azure_rm_netapp_account - new suboptions ``ad_name``, ``kdc_ip``, ``service_root_ca_certificate``` + for Active Directory. + - azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. + fragments: + - DEVOPS-3526.yaml + - DEVOPS-3663.yaml + - DEVOPS-3704.yaml + release_date: '2021-03-03' + 21.5.0: + changes: + minor_changes: + - azure_rm_netapp_volume - enable changes in volume size. + - azure_rm_netapp_volume - rename msg to mount_path, as documented in RETURN. + fragments: + - DEVOPS-3849.yaml + release_date: '2021-04-21' + 21.6.0: + changes: + minor_changes: + - azure_rm_netapp_account - support additional authentication schemes provided + by AzureRMModuleBase. + - azure_rm_netapp_capacity_pool - support additional authentication schemes + provided by AzureRMModuleBase, and tags. + - azure_rm_netapp_capacity_pool - wait for completion when creating, modifying, + or deleting a pool. + - azure_rm_netapp_snapshot - support additional authentication schemes provided + by AzureRMModuleBase. + - azure_rm_netapp_snapshot - wait for completion when creating, modifying, or + deleting a pool. + - azure_rm_netapp_volume - support additional authentication schemes provided + by AzureRMModuleBase, and tags. + fragments: + - DEVOPS-3935.yaml + - DEVOPS-3949.yaml + release_date: '2021-05-06' + 21.7.0: + changes: + bugfixes: + - fix CI pipeline to work with azcollection, and isolate UTs from azcollection. + fragments: + - DEVOPS-4001.yaml + release_date: '2021-06-03' + 21.8.0: + changes: + bugfixes: + - fix CI pipeline as azcollection does not support python 2.6. + - fix CI pipeline as ignores are not required with latest azcollection. + fragments: + - DEVOPS-4070.yaml + release_date: '2021-07-14' + 21.8.1: + changes: + bugfixes: + - Hub Automation cannot generate documentation (cannot use doc fragments from + another collection). + fragments: + - DEVOPS-4135.yaml + release_date: '2021-07-16' + 21.9.0: + changes: + bugfixes: + - azure_rm_netapp_volume - 'Change Ownership' is not permitted when creating + NFSv4.1 volume with latest azure-mgmt-netapp package (4.0.0). + minor_changes: + - azure_rm_netapp_volume - new option ``feature_flags`` to selectively enable/disable + a feature. + fragments: + - DEVOPS-4246.yaml + release_date: '2021-09-01' diff --git a/ansible_collections/netapp/azure/changelogs/config.yaml b/ansible_collections/netapp/azure/changelogs/config.yaml new file mode 100644 index 000000000..fcd0312f6 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: false +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Azure NetApp Files (ANF) Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/azure/changelogs/fragments/20.2.0.yaml b/ansible_collections/netapp/azure/changelogs/fragments/20.2.0.yaml new file mode 100644 index 000000000..3f764c1c9 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/20.2.0.yaml @@ -0,0 +1,2 @@ +bugfixes: + - galaxy.yml - fix path to github repository. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/20.4.0.yaml b/ansible_collections/netapp/azure/changelogs/fragments/20.4.0.yaml new file mode 100644 index 000000000..044018d2c --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/20.4.0.yaml @@ -0,0 +1,3 @@ +bugfixes: + - fix changes to azure-mgmt-netapp as per new release. + - removed ONTAP dependency import. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/20.5.0.yaml b/ansible_collections/netapp/azure/changelogs/fragments/20.5.0.yaml new file mode 100644 index 000000000..a2f16d6b0 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/20.5.0.yaml @@ -0,0 +1,6 @@ +minor_changes: + - azure_rm_netapp_account - new option ``tags``. + - azure_rm_netapp_capacity_pool - new option ``service_level``. + - azure_rm_netapp_volume - new option ``size``. + - azure_rm_netapp_volume - now returns mount_path of the volume specified. + - azure_rm_netapp_volume - new option ``vnet_resource_group_for_subnet``, resource group for virtual_network and subnet_id to be used. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/20.6.0.yaml b/ansible_collections/netapp/azure/changelogs/fragments/20.6.0.yaml new file mode 100644 index 000000000..67b15df45 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/20.6.0.yaml @@ -0,0 +1,3 @@ +minor_changes: + - azure_rm_netapp_capacity_pool - now allows modify for size. + - azure_rm_netapp_volume - now returns complete mount_path of the volume specified. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/20.7.0.yaml b/ansible_collections/netapp/azure/changelogs/fragments/20.7.0.yaml new file mode 100644 index 000000000..e150ea10d --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/20.7.0.yaml @@ -0,0 +1,2 @@ +bugfixes: + - azure_rm_netapp_capacity_pool - fixed idempotency for delete operation. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/20.8.0.yaml b/ansible_collections/netapp/azure/changelogs/fragments/20.8.0.yaml new file mode 100644 index 000000000..68e9d285d --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/20.8.0.yaml @@ -0,0 +1,3 @@ +minor_changes: + - azure_rm_netapp_capacity_pool - Updated ANF capacity pool modify function for size parameter mandatory issue. + - use a three group format for version_added. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3505.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3505.yaml new file mode 100644 index 000000000..87c49d052 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3505.yaml @@ -0,0 +1,4 @@ +minor_changes: + - azure_rm_netapp_volume - new option ``subnet_name`` as subnet_id is ambiguous. subnet_id is now aliased to subnet_name. +bugfixes: + - azure_rm_netapp_volume - fix 'Nonetype' object is not subscriptable exception when mount target is not created. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3526.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3526.yaml new file mode 100644 index 000000000..720ce523d --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3526.yaml @@ -0,0 +1,3 @@ +minor_changes: + - azure_rm_netapp_account - new option ``active_directories`` to support SMB volumes. + - azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3663.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3663.yaml new file mode 100644 index 000000000..e9adbdb9b --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3663.yaml @@ -0,0 +1,2 @@ +bugfixes: + - support for azure-mgmt-netapp 1.0.0, while maintaining compatibility with 0.10.0. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3704.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3704.yaml new file mode 100644 index 000000000..f0c1f6a38 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3704.yaml @@ -0,0 +1,5 @@ +minor_changes: + - azure_rm_netapp_account - new suboptions ``ad_name``, ``kdc_ip``, ``service_root_ca_certificate``` for Active Directory. + +bugfixes: + - azure_rm_netapp_account - wait for job completion for asynchroneous requests, and report belated errors. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3849.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3849.yaml new file mode 100644 index 000000000..7fc5d9fae --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3849.yaml @@ -0,0 +1,3 @@ +minor_changes: + - azure_rm_netapp_volume - enable changes in volume size. + - azure_rm_netapp_volume - rename msg to mount_path, as documented in RETURN. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3935.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3935.yaml new file mode 100644 index 000000000..c619dbcd9 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3935.yaml @@ -0,0 +1,2 @@ +minor_changes: + - azure_rm_netapp_account - support additional authentication schemes provided by AzureRMModuleBase. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3949.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3949.yaml new file mode 100644 index 000000000..2889546f8 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-3949.yaml @@ -0,0 +1,6 @@ +minor_changes: + - azure_rm_netapp_capacity_pool - support additional authentication schemes provided by AzureRMModuleBase, and tags. + - azure_rm_netapp_capacity_pool - wait for completion when creating, modifying, or deleting a pool. + - azure_rm_netapp_snapshot - support additional authentication schemes provided by AzureRMModuleBase. + - azure_rm_netapp_snapshot - wait for completion when creating, modifying, or deleting a pool. + - azure_rm_netapp_volume - support additional authentication schemes provided by AzureRMModuleBase, and tags. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4001.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4001.yaml new file mode 100644 index 000000000..2b09e21ce --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4001.yaml @@ -0,0 +1,2 @@ +bugfixes: + - fix CI pipeline to work with azcollection, and isolate UTs from azcollection. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4070.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4070.yaml new file mode 100644 index 000000000..0342115bb --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4070.yaml @@ -0,0 +1,3 @@ +bugfixes: + - fix CI pipeline as azcollection does not support python 2.6. + - fix CI pipeline as ignores are not required with latest azcollection. diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4135.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4135.yaml new file mode 100644 index 000000000..50e2e7d36 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4135.yaml @@ -0,0 +1,2 @@ +bugfixes: + - Hub Automation cannot generate documentation (cannot use doc fragments from another collection). diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4246.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4246.yaml new file mode 100644 index 000000000..781042d6f --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4246.yaml @@ -0,0 +1,4 @@ +minor_changes: + - azure_rm_netapp_volume - new option ``feature_flags`` to selectively enable/disable a feature. +bugfixes: + - azure_rm_netapp_volume - 'Change Ownership' is not permitted when creating NFSv4.1 volume with latest azure-mgmt-netapp package (4.0.0). diff --git a/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4416.yaml new file mode 100644 index 000000000..6b4b660a0 --- /dev/null +++ b/ansible_collections/netapp/azure/changelogs/fragments/DEVOPS-4416.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/azure/meta/runtime.yml b/ansible_collections/netapp/azure/meta/runtime.yml new file mode 100644 index 000000000..a6ed3542d --- /dev/null +++ b/ansible_collections/netapp/azure/meta/runtime.yml @@ -0,0 +1,8 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_azure: + - azure_rm_netapp_account + - azure_rm_netapp_capacity_pool + - azure_rm_netapp_snapshot + - azure_rm_netapp_volume diff --git a/ansible_collections/netapp/azure/plugins/doc_fragments/azure.py b/ansible_collections/netapp/azure/plugins/doc_fragments/azure.py new file mode 100644 index 000000000..49467db70 --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/doc_fragments/azure.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016 Matt Davis, +# Copyright: (c) 2016 Chris Houseknecht, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Azure doc fragment + DOCUMENTATION = r''' + +options: + ad_user: + description: + - Active Directory username. Use when authenticating with an Active Directory user rather than service + principal. + type: str + password: + description: + - Active Directory user password. Use when authenticating with an Active Directory user rather than service + principal. + type: str + profile: + description: + - Security profile found in ~/.azure/credentials file. + type: str + subscription_id: + description: + - Your Azure subscription Id. + type: str + client_id: + description: + - Azure client ID. Use when authenticating with a Service Principal. + type: str + secret: + description: + - Azure client secret. Use when authenticating with a Service Principal. + type: str + tenant: + description: + - Azure tenant ID. Use when authenticating with a Service Principal. + type: str + cloud_environment: + description: + - For cloud environments other than the US public cloud, the environment name (as defined by Azure Python SDK, eg, C(AzureChinaCloud), + C(AzureUSGovernment)), or a metadata discovery endpoint URL (required for Azure Stack). Can also be set via credential file profile or + the C(AZURE_CLOUD_ENVIRONMENT) environment variable. + type: str + default: AzureCloud + version_added: '0.0.1' + adfs_authority_url: + description: + - Azure AD authority url. Use when authenticating with Username/password, and has your own ADFS authority. + type: str + version_added: '0.0.1' + cert_validation_mode: + description: + - Controls the certificate validation behavior for Azure endpoints. By default, all modules will validate the server certificate, but + when an HTTPS proxy is in use, or against Azure Stack, it may be necessary to disable this behavior by passing C(ignore). Can also be + set via credential file profile or the C(AZURE_CERT_VALIDATION) environment variable. + type: str + choices: [ ignore, validate ] + version_added: '0.0.1' + auth_source: + description: + - Controls the source of the credentials to use for authentication. + - Can also be set via the C(ANSIBLE_AZURE_AUTH_SOURCE) environment variable. + - When set to C(auto) (the default) the precedence is module parameters -> C(env) -> C(credential_file) -> C(cli). + - When set to C(env), the credentials will be read from the environment variables + - When set to C(credential_file), it will read the profile from C(~/.azure/credentials). + - When set to C(cli), the credentials will be sources from the Azure CLI profile. C(subscription_id) or the environment variable + C(AZURE_SUBSCRIPTION_ID) can be used to identify the subscription ID if more than one is present otherwise the default + az cli subscription is used. + - When set to C(msi), the host machine must be an azure resource with an enabled MSI extension. C(subscription_id) or the + environment variable C(AZURE_SUBSCRIPTION_ID) can be used to identify the subscription ID if the resource is granted + access to more than one subscription, otherwise the first subscription is chosen. + - The C(msi) was added in Ansible 2.6. + type: str + default: auto + choices: + - auto + - cli + - credential_file + - env + - msi + version_added: '0.0.1' + api_profile: + description: + - Selects an API profile to use when communicating with Azure services. Default value of C(latest) is appropriate for public clouds; + future values will allow use with Azure Stack. + type: str + default: latest + version_added: '0.0.1' + log_path: + description: + - Parent argument. + type: str + log_mode: + description: + - Parent argument. + type: str +requirements: + - python >= 2.7 + - The host that executes this module must have the azure.azcollection collection installed via galaxy + - All python packages listed in collection's requirements-azure.txt must be installed via pip on the host that executes modules from azure.azcollection + - Full installation instructions may be found https://galaxy.ansible.com/azure/azcollection + +notes: + - For authentication with Azure you can pass parameters, set environment variables, use a profile stored + in ~/.azure/credentials, or log in before you run your tasks or playbook with C(az login). + - Authentication is also possible using a service principal or Active Directory user. + - To authenticate via service principal, pass subscription_id, client_id, secret and tenant or set environment + variables AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID, AZURE_SECRET and AZURE_TENANT. + - To authenticate via Active Directory user, pass ad_user and password, or set AZURE_AD_USER and + AZURE_PASSWORD in the environment. + - "Alternatively, credentials can be stored in ~/.azure/credentials. This is an ini file containing + a [default] section and the following keys: subscription_id, client_id, secret and tenant or + subscription_id, ad_user and password. It is also possible to add additional profiles. Specify the profile + by passing profile or setting AZURE_PROFILE in the environment." + +seealso: + - name: Sign in with Azure CLI + link: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest + description: How to authenticate using the C(az login) command. + ''' diff --git a/ansible_collections/netapp/azure/plugins/doc_fragments/azure_tags.py b/ansible_collections/netapp/azure/plugins/doc_fragments/azure_tags.py new file mode 100644 index 000000000..8edb80eed --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/doc_fragments/azure_tags.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Matt Davis, +# Copyright: (c) 2016, Chris Houseknecht, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Azure doc fragment + DOCUMENTATION = r''' +options: + tags: + description: + - Dictionary of string:string pairs to assign as metadata to the object. + - Metadata tags on the object will be updated with any provided values. + - To remove tags set append_tags option to false. + - Currently, Azure DNS zones and Traffic Manager services also don't allow the use of spaces in the tag. + - Azure Front Door doesn't support the use of # in the tag name. + - Azure Automation and Azure CDN only support 15 tags on resources. + type: dict + append_tags: + description: + - Use to control if tags field is canonical or just appends to existing tags. + - When canonical, any tags not found in the tags parameter will be removed from the object's metadata. + type: bool + default: yes + ''' diff --git a/ansible_collections/netapp/azure/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/azure/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..18e9cc2a2 --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/doc_fragments/netapp.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, NetApp Ansible Team ng-ansibleteam@netapp.com +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire +''' + + # Documentation fragment for Cloud Volume Services on Azure NetApp (azure_rm_netapp) + AZURE_RM_NETAPP = r''' +options: + resource_group: + description: + - Name of the resource group. + required: true + type: str +requirements: + - python >= 2.7 + - azure >= 2.0.0 + - Python azure-mgmt. Install using 'pip install azure-mgmt' + - Python azure-mgmt-netapp. Install using 'pip install azure-mgmt-netapp' + - For authentication with Azure NetApp log in before you run your tasks or playbook with C(az login). + +notes: + - The modules prefixed with azure_rm_netapp are built to support the Cloud Volume Services for Azure NetApp Files. + +seealso: + - name: Sign in with Azure CLI + link: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest + description: How to authenticate using the C(az login) command. + ''' diff --git a/ansible_collections/netapp/azure/plugins/module_utils/azure_rm_netapp_common.py b/ansible_collections/netapp/azure/plugins/module_utils/azure_rm_netapp_common.py new file mode 100644 index 000000000..716e4fb95 --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/module_utils/azure_rm_netapp_common.py @@ -0,0 +1,156 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +azure_rm_netapp_common +Wrapper around AzureRMModuleBase base class +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + +HAS_AZURE_COLLECTION = True +NEW_STYLE = None +COLLECTION_VERSION = "21.10.0" +IMPORT_ERRORS = [] +SDK_VERSION = "0.0.0" + +if 'pytest' in sys.modules: + from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import AzureRMModuleBaseMock as AzureRMModuleBase +else: + try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + HAS_AZURE_COLLECTION = False + except SyntaxError as exc: + # importing Azure collection fails with python 2.6 + if sys.version_info < (2, 8): + IMPORT_ERRORS.append(str(exc)) + from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import AzureRMModuleBaseMock as AzureRMModuleBase + HAS_AZURE_COLLECTION = False + else: + raise + +try: + from azure.mgmt.netapp import NetAppManagementClient # 1.0.0 or newer + NEW_STYLE = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + try: + from azure.mgmt.netapp import AzureNetAppFilesManagementClient # 0.10.0 or older + NEW_STYLE = False + except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +try: + from azure.mgmt.netapp import VERSION as SDK_VERSION +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + + +class AzureRMNetAppModuleBase(AzureRMModuleBase): + ''' Wrapper around AzureRMModuleBase base class ''' + def __init__(self, derived_arg_spec, required_if=None, supports_check_mode=False, supports_tags=True): + self._netapp_client = None + self._new_style = NEW_STYLE + self._sdk_version = SDK_VERSION + super(AzureRMNetAppModuleBase, self).__init__(derived_arg_spec=derived_arg_spec, + required_if=required_if, + supports_check_mode=supports_check_mode, + supports_tags=supports_tags) + if not HAS_AZURE_COLLECTION: + self.fail_when_import_errors(IMPORT_ERRORS) + + def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None): + if not self._new_style: + return super(AzureRMNetAppModuleBase, self).get_mgmt_svc_client(client_type, base_url, api_version) + self.log('Getting management service client NetApp {0}'.format(client_type.__name__)) + self.check_client_version(client_type) + + if not base_url: + # most things are resource_manager, don't make everyone specify + base_url = self.azure_auth._cloud_environment.endpoints.resource_manager + + client_kwargs = dict(credential=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url) + + return client_type(**client_kwargs) + + @property + def netapp_client(self): + self.log('Getting netapp client') + if self._new_style is None: + # note that we always have at least one import error + self.fail_when_import_errors(IMPORT_ERRORS) + if self._netapp_client is None: + if self._new_style: + self._netapp_client = self.get_mgmt_svc_client(NetAppManagementClient) + else: + self._netapp_client = self.get_mgmt_svc_client(AzureNetAppFilesManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-05-01') + return self._netapp_client + + @property + def new_style(self): + return self._new_style + + @property + def sdk_version(self): + return self._sdk_version + + def get_method(self, category, name): + try: + methods = getattr(self.netapp_client, category) + except AttributeError as exc: + self.module.fail_json('Error: category %s not found for netapp_client: %s' % (category, str(exc))) + + if self._new_style: + name = 'begin_' + name + try: + method = getattr(methods, name) + except AttributeError as exc: + self.module.fail_json('Error: method %s not found for netapp_client category: %s - %s' % (name, category, str(exc))) + return method + + def fail_when_import_errors(self, import_errors, has_azure_mgmt_netapp=True): + if has_azure_mgmt_netapp and not import_errors: + return + msg = '' + if not has_azure_mgmt_netapp: + msg = "The python azure-mgmt-netapp package is required. " + if hasattr(self, 'module'): + msg += 'Import errors: %s' % str(import_errors) + self.module.fail_json(msg=msg) + msg += str(import_errors) + raise ImportError(msg) + + def has_feature(self, feature_name): + feature = self.get_feature(feature_name) + if isinstance(feature, bool): + return feature + self.module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name) + + def get_feature(self, feature_name): + ''' if the user has configured the feature, use it + otherwise, use our default + ''' + default_flags = dict( + # TODO: review need for these + # trace_apis=False, # if true, append REST requests/responses to /tmp/azure_apis.log + # check_required_params_for_none=True, + # deprecation_warning=True, + # show_modified=True, + # + # preview features in ANF + ignore_change_ownership_mode=True + ) + + if self.parameters.get('feature_flags') is not None and feature_name in self.parameters['feature_flags']: + return self.parameters['feature_flags'][feature_name] + if feature_name in default_flags: + return default_flags[feature_name] + self.module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name) diff --git a/ansible_collections/netapp/azure/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/azure/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..9ee758c01 --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/module_utils/netapp_module.py @@ -0,0 +1,271 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils import basic + + +class AzureRMModuleBaseMock(): + ''' Mock for sanity tests when azcollection is not installed ''' + def __init__(self, derived_arg_spec, required_if=None, supports_check_mode=False, supports_tags=True, **kwargs): + if supports_tags: + derived_arg_spec.update(dict(tags=dict())) + self.module = basic.AnsibleModule( + argument_spec=derived_arg_spec, + required_if=required_if, + supports_check_mode=supports_check_mode + ) + self.module.warn('Running in Unit Test context!') + # the following is done in exec_module() + self.parameters = dict([item for item in self.module.params.items() if item[1] is not None]) + # remove values with a default of None (not required) + self.module_arg_spec = dict([item for item in self.module_arg_spec.items() if item[0] in self.parameters]) + + def update_tags(self, tags): + self.module.log('update_tags called with:', tags) + return None, None + + +def cmp(obj1, obj2): + """ + Python 3 does not have a cmp function, this will do the cmp. + :param a: first object to check + :param b: second object to check + :return: + """ + # convert to lower case for string comparison. + if obj1 is None: + return -1 + if isinstance(obj1, str) and isinstance(obj2, str): + obj1 = obj1.lower() + obj2 = obj2.lower() + # if list has string element, convert string to lower case. + if isinstance(obj1, list) and isinstance(obj2, list): + obj1 = [x.lower() if isinstance(x, str) else x for x in obj1] + obj2 = [x.lower() if isinstance(x, str) else x for x in obj2] + obj1.sort() + obj2.sort() + if isinstance(obj1, dict) and isinstance(obj2, dict): + return 0 if obj1 == obj2 else 1 + return (obj1 > obj2) - (obj1 < obj2) + + +class NetAppModule(): + ''' + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + ''' + + def __init__(self): + self.log = [] + self.changed = False + self.parameters = {'name': 'not intialized'} + self.zapi_string_keys = dict() + self.zapi_bool_keys = dict() + self.zapi_list_keys = {} + self.zapi_int_keys = {} + self.zapi_required = {} + + def set_parameters(self, ansible_params): + self.parameters = {} + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters + + def get_cd_action(self, current, desired): + ''' takes a desired state and a current state, and return an action: + create, delete, None + eg: + is_present = 'absent' + some_object = self.get_object(source) + if some_object is not None: + is_present = 'present' + action = cd_action(current=is_present, desired = self.desired.state()) + ''' + desired_state = desired['state'] if 'state' in desired else 'present' + if current is None and desired_state == 'absent': + return None + if current is not None and desired_state == 'present': + return None + # change in state + self.changed = True + if current is not None: + return 'delete' + return 'create' + + def compare_and_update_values(self, current, desired, keys_to_compare): + updated_values = {} + is_changed = False + for key in keys_to_compare: + if key in current: + if key in desired and desired[key] is not None: + if current[key] != desired[key]: + updated_values[key] = desired[key] + is_changed = True + else: + updated_values[key] = current[key] + else: + updated_values[key] = current[key] + + return updated_values, is_changed + + @staticmethod + def check_keys(current, desired): + ''' TODO: raise an error if keys do not match + with the exception of: + new_name, state in desired + ''' + + @staticmethod + def compare_lists(current, desired, get_list_diff): + ''' compares two lists and return a list of elements that are either the desired elements or elements that are + modified from the current state depending on the get_list_diff flag + :param: current: current item attribute in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: list of attributes to be modified + :rtype: list + ''' + desired_diff_list = [item for item in desired if item not in current] # get what in desired and not in current + current_diff_list = [item for item in current if item not in desired] # get what in current but not in desired + + if desired_diff_list or current_diff_list: + # there are changes + if get_list_diff: + return desired_diff_list + else: + return desired + else: + return [] + + def get_modified_attributes(self, current, desired, get_list_diff=False): + ''' takes two dicts of attributes and return a dict of attributes that are + not in the current state + It is expected that all attributes of interest are listed in current and + desired. + :param: current: current attributes in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: dict of attributes to be modified + :rtype: dict + + NOTE: depending on the attribute, the caller may need to do a modify or a + different operation (eg move volume if the modified attribute is an + aggregate name) + ''' + # if the object does not exist, we can't modify it + modified = {} + if current is None: + return modified + + # error out if keys do not match + self.check_keys(current, desired) + + # collect changed attributes + for key, value in current.items(): + if key in desired and desired[key] is not None: + if isinstance(value, list): + modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired + if modified_list: + modified[key] = modified_list + elif cmp(value, desired[key]) != 0: + modified[key] = desired[key] + if modified: + self.changed = True + return modified + + def is_rename_action(self, source, target): + ''' takes a source and target object, and returns True + if a rename is required + eg: + source = self.get_object(source_name) + target = self.get_object(target_name) + action = is_rename_action(source, target) + :return: None for error, True for rename action, False otherwise + ''' + if source is None and target is None: + # error, do nothing + # cannot rename an non existent resource + # alternatively we could create B + return None + if source is not None and target is not None: + # error, do nothing + # idempotency (or) new_name_is_already_in_use + # alternatively we could delete B and rename A to B + return False + if source is None: + # do nothing, maybe the rename was already done + return False + # source is not None and target is None: + # rename is in order + self.changed = True + return True + + def filter_out_none_entries(self, list_or_dict): + """take a dict or list as input and return a dict/list without keys/elements whose values are None + skip empty dicts or lists. + """ + + if isinstance(list_or_dict, dict): + result = {} + for key, value in list_or_dict.items(): + if isinstance(value, (list, dict)): + sub = self.filter_out_none_entries(value) + if sub: + # skip empty dict or list + result[key] = sub + elif value is not None: + # skip None value + result[key] = value + return result + + if isinstance(list_or_dict, list): + alist = [] + for item in list_or_dict: + if isinstance(item, (list, dict)): + sub = self.filter_out_none_entries(item) + if sub: + # skip empty dict or list + alist.append(sub) + elif item is not None: + # skip None value + alist.append(item) + return alist + + raise TypeError('unexpected type %s' % type(list_or_dict)) + + @staticmethod + def get_not_none_values_from_dict(parameters, keys): + # python 2.6 does not support dict comprehension using k: v + return dict((key, value) for key, value in parameters.items() if key in keys and value is not None) diff --git a/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_account.py b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_account.py new file mode 100644 index 000000000..c09ade0df --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_account.py @@ -0,0 +1,404 @@ +#!/usr/bin/python +# +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +azure_rm_netapp_account +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_netapp_account + +short_description: Manage NetApp Azure Files Account +version_added: 19.10.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create and delete NetApp Azure account. + Provide the Resource group name for the NetApp account to be created. +extends_documentation_fragment: + - netapp.azure.azure + - netapp.azure.azure_tags + - netapp.azure.netapp.azure_rm_netapp + +options: + name: + description: + - The name of the NetApp account. + required: true + type: str + location: + description: + - Resource location. + - Required for create. + type: str + + active_directories: + description: + - list of active directory dictionaries. + - The list is currently limited to a single active directory (ANF or Azure limit of one AD per subscription). + type: list + elements: dict + version_added: 21.2.0 + suboptions: + active_directory_id: + description: not used for create. Not needed for join. + type: str + dns: + description: list of DNS addresses. Required for create or join. + type: list + elements: str + domain: + description: Fully Qualified Active Directory DNS Domain Name. Required for create or join. + type: str + site: + description: The Active Directory site the service will limit Domain Controller discovery to. + type: str + smb_server_name: + description: Prefix for creating the SMB server's computer account name in the Active Directory domain. Required for create or join. + type: str + organizational_unit: + description: LDAP Path for the Organization Unit where SMB Server machine accounts will be created (i.e. OU=SecondLevel,OU=FirstLevel). + type: str + username: + description: Credentials that have permissions to create SMB server machine account in the AD domain. Required for create or join. + type: str + password: + description: see username. If password is present, the module is not idempotent, as we cannot check the current value. Required for create or join. + type: str + aes_encryption: + description: If enabled, AES encryption will be enabled for SMB communication. + type: bool + ldap_signing: + description: Specifies whether or not the LDAP traffic needs to be signed. + type: bool + ad_name: + description: Name of the active directory machine. Used only while creating kerberos volume. + type: str + version_added: 21.3.0 + kdc_ip: + description: kdc server IP addresses for the active directory machine. Used only while creating kerberos volume. + type: str + version_added: 21.3.0 + server_root_ca_certificate: + description: + - When LDAP over SSL/TLS is enabled, the LDAP client is required to have base64 encoded Active Directory Certificate Service's + self-signed root CA certificate, this optional parameter is used only for dual protocol with LDAP user-mapping volumes. + type: str + version_added: 21.3.0 + state: + description: + - State C(present) will check that the NetApp account exists with the requested configuration. + - State C(absent) will delete the NetApp account. + default: present + choices: + - absent + - present + type: str + debug: + description: output details about current account if it exists. + type: bool + default: false + +''' +EXAMPLES = ''' + +- name: Create NetApp Azure Account + netapp.azure.azure_rm_netapp_account: + resource_group: myResourceGroup + name: testaccount + location: eastus + tags: {'abc': 'xyz', 'cba': 'zyx'} + +- name: Modify Azure NetApp account (Join AD) + netapp.azure.azure_rm_netapp_account: + resource_group: myResourceGroup + name: testaccount + location: eastus + active_directories: + - site: ln + dns: 10.10.10.10 + domain: domain.com + smb_server_name: dummy + password: xxxxxx + username: laurentn + +- name: Delete NetApp Azure Account + netapp.azure.azure_rm_netapp_account: + state: absent + resource_group: myResourceGroup + name: testaccount + location: eastus + +- name: Create Azure NetApp account (with AD) + netapp.azure.azure_rm_netapp_account: + resource_group: laurentngroupnodash + name: tests-netapp11 + location: eastus + tags: + creator: laurentn + use: Ansible + active_directories: + - site: ln + dns: 10.10.10.10 + domain: domain.com + smb_server_name: dummy + password: xxxxxx + username: laurentn +''' + +RETURN = ''' +''' + +import traceback + +HAS_AZURE_MGMT_NETAPP = False +IMPORT_ERRORS = list() + +try: + from msrestazure.azure_exceptions import CloudError + from azure.core.exceptions import AzureError, ResourceNotFoundError +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +try: + from azure.mgmt.netapp.models import NetAppAccount, NetAppAccountPatch, ActiveDirectory + HAS_AZURE_MGMT_NETAPP = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +from ansible.module_utils.basic import to_native +from ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common import AzureRMNetAppModuleBase +from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import NetAppModule + + +class AzureRMNetAppAccount(AzureRMNetAppModuleBase): + ''' create, modify, delete account, including joining AD domain + ''' + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str', required=False), + state=dict(choices=['present', 'absent'], default='present', type='str'), + active_directories=dict(type='list', elements='dict', options=dict( + active_directory_id=dict(type='str'), + dns=dict(type='list', elements='str'), + domain=dict(type='str'), + site=dict(type='str'), + smb_server_name=dict(type='str'), + organizational_unit=dict(type='str'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + aes_encryption=dict(type='bool'), + ldap_signing=dict(type='bool'), + ad_name=dict(type='str'), + kdc_ip=dict(type='str'), + server_root_ca_certificate=dict(type='str', no_log=True), + )), + debug=dict(type='bool', default=False) + ) + + self.na_helper = NetAppModule() + self.parameters = dict() + self.debug = list() + self.warnings = list() + + # import errors are handled in AzureRMModuleBase + super(AzureRMNetAppAccount, self).__init__(derived_arg_spec=self.module_arg_spec, + required_if=[('state', 'present', ['location'])], + supports_check_mode=True) + + def get_azure_netapp_account(self): + """ + Returns NetApp Account object for an existing account + Return None if account does not exist + """ + try: + account_get = self.netapp_client.accounts.get(self.parameters['resource_group'], self.parameters['name']) + except (CloudError, ResourceNotFoundError): # account does not exist + return None + account = vars(account_get) + ads = None + if account.get('active_directories') is not None: + ads = list() + for each_ad in account.get('active_directories'): + ad_dict = vars(each_ad) + dns = ad_dict.get('dns') + if dns is not None: + ad_dict['dns'] = sorted(dns.split(',')) + ads.append(ad_dict) + account['active_directories'] = ads + return account + + def create_account_request_body(self, modify=None): + """ + Create an Azure NetApp Account Request Body + :return: None + """ + options = dict() + location = None + for attr in ('location', 'tags', 'active_directories'): + value = self.parameters.get(attr) + if attr == 'location' and modify is None: + location = value + continue + if value is not None: + if modify is None or attr in modify: + if attr == 'active_directories': + ads = list() + for ad_dict in value: + if ad_dict.get('dns') is not None: + # API expects a string of comma separated elements + ad_dict['dns'] = ','.join(ad_dict['dns']) + ads.append(ActiveDirectory(**self.na_helper.filter_out_none_entries(ad_dict))) + value = ads + options[attr] = value + if modify is None: + if location is None: + self.module.fail_json(msg="Error: 'location' is a required parameter") + return NetAppAccount(location=location, **options) + return NetAppAccountPatch(**options) + + def create_azure_netapp_account(self): + """ + Create an Azure NetApp Account + :return: None + """ + account_body = self.create_account_request_body() + try: + response = self.get_method('accounts', 'create_or_update')(body=account_body, + resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['name']) + while response.done() is not True: + response.result(10) + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error creating Azure NetApp account %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def update_azure_netapp_account(self, modify): + """ + Create an Azure NetApp Account + :return: None + """ + account_body = self.create_account_request_body(modify) + try: + response = self.get_method('accounts', 'update')(body=account_body, + resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['name']) + while response.done() is not True: + response.result(10) + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error creating Azure NetApp account %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_azure_netapp_account(self): + """ + Delete an Azure NetApp Account + :return: None + """ + try: + response = self.get_method('accounts', 'delete')(resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['name']) + while response.done() is not True: + response.result(10) + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error deleting Azure NetApp account %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_changes_in_ads(self, current, desired): + c_ads = current.get('active_directories') + d_ads = desired.get('active_directories') + if not c_ads: + return desired.get('active_directories'), None + if not d_ads: + return None, current.get('active_directories') + if len(c_ads) > 1 or len(d_ads) > 1: + msg = 'Error checking for AD, currently only one AD is supported.' + if len(c_ads) > 1: + msg += ' Current: %s.' % str(c_ads) + if len(d_ads) > 1: + msg += ' Desired: %s.' % str(d_ads) + self.module.fail_json(msg='Error checking for AD, currently only one AD is supported') + changed = False + d_ad = d_ads[0] + c_ad = c_ads[0] + for key, value in c_ad.items(): + if key == 'password': + if d_ad.get(key) is None: + continue + self.warnings.append("module is not idempotent if 'password:' is present") + if d_ad.get(key) is None: + d_ad[key] = value + elif d_ad.get(key) != value: + changed = True + self.debug.append("key: %s, value %s" % (key, value)) + if changed: + return [d_ad], None + return None, None + + def exec_module(self, **kwargs): + + # unlikely + self.fail_when_import_errors(IMPORT_ERRORS, HAS_AZURE_MGMT_NETAPP) + + # set up parameters according to our initial list + for key in list(self.module_arg_spec): + self.parameters[key] = kwargs[key] + # and common parameter + for key in ['tags']: + if key in kwargs: + self.parameters[key] = kwargs[key] + + current = self.get_azure_netapp_account() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + self.debug.append('current: %s' % str(current)) + if current is not None and cd_action is None: + ads_to_add, ads_to_delete = self.get_changes_in_ads(current, self.parameters) + self.parameters.pop('active_directories', None) + if ads_to_add: + self.parameters['active_directories'] = ads_to_add + if ads_to_delete: + self.module.fail_json(msg="Error: API does not support unjoining an AD", debug=self.debug) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if 'tags' in modify: + dummy, modify['tags'] = self.update_tags(current.get('tags')) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_azure_netapp_account() + elif cd_action == 'delete': + self.delete_azure_netapp_account() + elif modify: + self.update_azure_netapp_account(modify) + results = dict( + changed=self.na_helper.changed, + modify=modify + ) + if self.warnings: + results['warnings'] = self.warnings + if self.parameters['debug']: + results['debug'] = self.debug + self.module.exit_json(**results) + + +def main(): + AzureRMNetAppAccount() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_capacity_pool.py b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_capacity_pool.py new file mode 100644 index 000000000..9d099a03f --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_capacity_pool.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +azure_rm_netapp_capacity_pool +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_netapp_capacity_pool + +short_description: Manage NetApp Azure Files capacity pool +version_added: 19.10.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create and delete NetApp Azure capacity pool. + Provide the Resource group name for the capacity pool to be created. + - Resize NetApp Azure capacity pool +extends_documentation_fragment: + - netapp.azure.azure + - netapp.azure.azure_tags + - netapp.azure.netapp.azure_rm_netapp + +options: + name: + description: + - The name of the capacity pool. + required: true + type: str + account_name: + description: + - The name of the NetApp account. + required: true + type: str + location: + description: + - Resource location. + - Required for create. + type: str + size: + description: + - Provisioned size of the pool (in chunks). Allowed values are in 4TiB chunks. + - Provide number to be multiplied to 4TiB. + - Required for create. + default: 1 + type: int + service_level: + description: + - The service level of the file system. + - Required for create. + choices: ['Standard', 'Premium', 'Ultra'] + type: str + version_added: "20.5.0" + state: + description: + - State C(present) will check that the capacity pool exists with the requested configuration. + - State C(absent) will delete the capacity pool. + default: present + choices: ['present', 'absent'] + type: str + +''' +EXAMPLES = ''' + +- name: Create Azure NetApp capacity pool + netapp.azure.azure_rm_netapp_capacity_pool: + resource_group: myResourceGroup + account_name: tests-netapp + name: tests-pool + location: eastus + size: 2 + service_level: Standard + +- name: Resize Azure NetApp capacity pool + netapp.azure.azure_rm_netapp_capacity_pool: + resource_group: myResourceGroup + account_name: tests-netapp + name: tests-pool + location: eastus + size: 3 + service_level: Standard + +- name: Delete Azure NetApp capacity pool + netapp.azure.azure_rm_netapp_capacity_pool: + state: absent + resource_group: myResourceGroup + account_name: tests-netapp + name: tests-pool + +''' + +RETURN = ''' +''' + +import traceback + +AZURE_OBJECT_CLASS = 'NetAppAccount' +HAS_AZURE_MGMT_NETAPP = False +IMPORT_ERRORS = list() +SIZE_POOL = 4398046511104 + +try: + from msrestazure.azure_exceptions import CloudError + from azure.core.exceptions import AzureError, ResourceNotFoundError +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +try: + from azure.mgmt.netapp.models import CapacityPool + HAS_AZURE_MGMT_NETAPP = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +from ansible.module_utils.basic import to_native +from ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common import AzureRMNetAppModuleBase +from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import NetAppModule + + +class AzureRMNetAppCapacityPool(AzureRMNetAppModuleBase): + """ create, modify, delete a capacity pool """ + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + account_name=dict(type='str', required=True), + location=dict(type='str', required=False), + state=dict(choices=['present', 'absent'], default='present', type='str'), + size=dict(type='int', required=False, default=1), + service_level=dict(type='str', required=False, choices=['Standard', 'Premium', 'Ultra']), + ) + + self.na_helper = NetAppModule() + self.parameters = dict() + + # import errors are handled in AzureRMModuleBase + super(AzureRMNetAppCapacityPool, self).__init__(derived_arg_spec=self.module_arg_spec, + required_if=[('state', 'present', ['location', 'service_level'])], + supports_check_mode=True) + + def get_azure_netapp_capacity_pool(self): + """ + Returns capacity pool object for an existing pool + Return None if capacity pool does not exist + """ + try: + capacity_pool_get = self.netapp_client.pools.get(self.parameters['resource_group'], + self.parameters['account_name'], self.parameters['name']) + except (CloudError, ResourceNotFoundError): # capacity pool does not exist + return None + return capacity_pool_get + + def create_azure_netapp_capacity_pool(self): + """ + Create a capacity pool for the given Azure NetApp Account + :return: None + """ + options = self.na_helper.get_not_none_values_from_dict(self.parameters, ['location', 'service_level', 'size', 'tags']) + capacity_pool_body = CapacityPool(**options) + try: + response = self.get_method('pools', 'create_or_update')(body=capacity_pool_body, resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['name']) + while response.done() is not True: + response.result(10) + + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error creating capacity pool %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_azure_netapp_capacity_pool(self, modify): + """ + Modify a capacity pool for the given Azure NetApp Account + :return: None + """ + options = self.na_helper.get_not_none_values_from_dict(self.parameters, ['location', 'service_level', 'size', 'tags']) + capacity_pool_body = CapacityPool(**options) + try: + response = self.get_method('pools', 'update')(body=capacity_pool_body, resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['name']) + while response.done() is not True: + response.result(10) + + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error modifying capacity pool %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_azure_netapp_capacity_pool(self): + """ + Delete a capacity pool for the given Azure NetApp Account + :return: None + """ + try: + response = self.get_method('pools', 'delete')(resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], pool_name=self.parameters['name']) + while response.done() is not True: + response.result(10) + + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error deleting capacity pool %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def exec_module(self, **kwargs): + + # unlikely + self.fail_when_import_errors(IMPORT_ERRORS, HAS_AZURE_MGMT_NETAPP) + + # set up parameters according to our initial list + for key in list(self.module_arg_spec): + self.parameters[key] = kwargs[key] + # and common parameter + for key in ['tags']: + if key in kwargs: + self.parameters[key] = kwargs[key] + if 'size' in self.parameters: + self.parameters['size'] *= SIZE_POOL + + modify = {} + current = self.get_azure_netapp_capacity_pool() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + current = vars(current) + # get_azure_netapp_capacity_pool() returns pool name with account name appended in front of it like 'account/pool' + current['name'] = self.parameters['name'] + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if 'tags' in modify: + dummy, modify['tags'] = self.update_tags(current.get('tags')) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_azure_netapp_capacity_pool() + elif cd_action == 'delete': + self.delete_azure_netapp_capacity_pool() + elif modify: + self.modify_azure_netapp_capacity_pool(modify) + + self.module.exit_json(changed=self.na_helper.changed, modify=modify) + + +def main(): + AzureRMNetAppCapacityPool() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_snapshot.py b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_snapshot.py new file mode 100644 index 000000000..212f10861 --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_snapshot.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +azure_rm_netapp_snapshot +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_netapp_snapshot + +short_description: Manage NetApp Azure Files Snapshot +version_added: 19.10.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create and delete NetApp Azure Snapshot. +extends_documentation_fragment: + - netapp.azure.azure + - netapp.azure.netapp.azure_rm_netapp + +options: + name: + description: + - The name of the snapshot. + required: true + type: str + volume_name: + description: + - The name of the volume. + required: true + type: str + pool_name: + description: + - The name of the capacity pool. + required: true + type: str + account_name: + description: + - The name of the NetApp account. + required: true + type: str + location: + description: + - Resource location. + - Required for create. + type: str + state: + description: + - State C(present) will check that the snapshot exists with the requested configuration. + - State C(absent) will delete the snapshot. + default: present + choices: + - absent + - present + type: str + +''' +EXAMPLES = ''' + +- name: Create Azure NetApp Snapshot + netapp.azure.azure_rm_netapp_snapshot: + resource_group: myResourceGroup + account_name: tests-netapp + pool_name: tests-pool + volume_name: tests-volume2 + name: tests-snapshot + location: eastus + +- name: Delete Azure NetApp Snapshot + netapp.azure.azure_rm_netapp_snapshot: + state: absent + resource_group: myResourceGroup + account_name: tests-netapp + pool_name: tests-pool + volume_name: tests-volume2 + name: tests-snapshot + +''' + +RETURN = ''' +''' + +import traceback + +AZURE_OBJECT_CLASS = 'NetAppAccount' +HAS_AZURE_MGMT_NETAPP = False +IMPORT_ERRORS = list() + +try: + from msrestazure.azure_exceptions import CloudError + from azure.core.exceptions import AzureError, ResourceNotFoundError +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +try: + from azure.mgmt.netapp.models import Snapshot + HAS_AZURE_MGMT_NETAPP = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +from ansible.module_utils.basic import to_native +from ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common import AzureRMNetAppModuleBase +from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import NetAppModule + + +class AzureRMNetAppSnapshot(AzureRMNetAppModuleBase): + """ crate or delete snapshots """ + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + volume_name=dict(type='str', required=True), + pool_name=dict(type='str', required=True), + account_name=dict(type='str', required=True), + location=dict(type='str', required=False), + state=dict(choices=['present', 'absent'], default='present', type='str') + ) + self.na_helper = NetAppModule() + self.parameters = dict() + + # import errors are handled in AzureRMModuleBase + super(AzureRMNetAppSnapshot, self).__init__(derived_arg_spec=self.module_arg_spec, + required_if=[('state', 'present', ['location'])], + supports_check_mode=True, + supports_tags=False) + + def get_azure_netapp_snapshot(self): + """ + Returns snapshot object for an existing snapshot + Return None if snapshot does not exist + """ + try: + snapshot_get = self.netapp_client.snapshots.get(self.parameters['resource_group'], self.parameters['account_name'], + self.parameters['pool_name'], self.parameters['volume_name'], + self.parameters['name']) + except (CloudError, ResourceNotFoundError): # snapshot does not exist + return None + return snapshot_get + + def create_azure_netapp_snapshot(self): + """ + Create a snapshot for the given Azure NetApp Account + :return: None + """ + kw_args = dict( + resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['pool_name'], + volume_name=self.parameters['volume_name'], + snapshot_name=self.parameters['name'] + ) + if self.new_style: + kw_args['body'] = Snapshot( + location=self.parameters['location'] + ) + else: + kw_args['location'] = self.parameters['location'] + try: + result = self.get_method('snapshots', 'create')(**kw_args) + # waiting till the status turns Succeeded + while result.done() is not True: + result.result(10) + + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error creating snapshot %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_azure_netapp_snapshot(self): + """ + Delete a snapshot for the given Azure NetApp Account + :return: None + """ + try: + result = self.get_method('snapshots', 'delete')(resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['pool_name'], + volume_name=self.parameters['volume_name'], + snapshot_name=self.parameters['name']) + # waiting till the status turns Succeeded + while result.done() is not True: + result.result(10) + + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error deleting snapshot %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def exec_module(self, **kwargs): + + # unlikely + self.fail_when_import_errors(IMPORT_ERRORS, HAS_AZURE_MGMT_NETAPP) + + # set up parameters according to our initial list + for key in list(self.module_arg_spec): + self.parameters[key] = kwargs[key] + + current = self.get_azure_netapp_snapshot() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_azure_netapp_snapshot() + elif cd_action == 'delete': + self.delete_azure_netapp_snapshot() + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + AzureRMNetAppSnapshot() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_volume.py b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_volume.py new file mode 100644 index 000000000..487787ee7 --- /dev/null +++ b/ansible_collections/netapp/azure/plugins/modules/azure_rm_netapp_volume.py @@ -0,0 +1,399 @@ +#!/usr/bin/python +# +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +azure_rm_netapp_volume +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_netapp_volume + +short_description: Manage NetApp Azure Files Volume +version_added: 19.10.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create and delete NetApp Azure volume. +extends_documentation_fragment: + - netapp.azure.azure + - netapp.azure.azure_tags + - netapp.azure.netapp.azure_rm_netapp + +options: + name: + description: + - The name of the volume. + required: true + type: str + file_path: + description: + - A unique file path for the volume. Used when creating mount targets. + type: str + pool_name: + description: + - The name of the capacity pool. + required: true + type: str + account_name: + description: + - The name of the NetApp account. + required: true + type: str + location: + description: + - Resource location. + - Required for create. + type: str + subnet_name: + description: + - Azure resource name for a delegated subnet. Must have the delegation Microsoft.NetApp/volumes. + - Provide name of the subnet ID. + - Required for create. + type: str + aliases: ['subnet_id'] + version_added: 21.1.0 + virtual_network: + description: + - The name of the virtual network required for the subnet to create a volume. + - Required for create. + type: str + service_level: + description: + - The service level of the file system. + - default is Premium. + type: str + choices: ['Premium', 'Standard', 'Ultra'] + vnet_resource_group_for_subnet: + description: + - Only required if virtual_network to be used is of different resource_group. + - Name of the resource group for virtual_network and subnet_name to be used. + type: str + version_added: "20.5.0" + size: + description: + - Provisioned size of the volume (in GiB). + - Minimum size is 100 GiB. Upper limit is 100TiB + - default is 100GiB. + version_added: "20.5.0" + type: int + protocol_types: + description: + - Protocol types - NFSv3, NFSv4.1, CIFS (for SMB). + type: list + elements: str + version_added: 21.2.0 + state: + description: + - State C(present) will check that the volume exists with the requested configuration. + - State C(absent) will delete the volume. + default: present + choices: ['present', 'absent'] + type: str + feature_flags: + description: + - Enable or disable a new feature. + - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility. + - Supported keys and values are subject to change without notice. Unknown keys are ignored. + type: dict + version_added: 21.9.0 +notes: + - feature_flags is setting ignore_change_ownership_mode to true by default to bypass a 'change ownership mode' issue with azure-mgmt-netapp 4.0.0. +''' +EXAMPLES = ''' + +- name: Create Azure NetApp volume + netapp.azure.azure_rm_netapp_volume: + resource_group: myResourceGroup + account_name: tests-netapp + pool_name: tests-pool + name: tests-volume2 + location: eastus + file_path: tests-volume2 + virtual_network: myVirtualNetwork + vnet_resource_group_for_subnet: myVirtualNetworkResourceGroup + subnet_name: test + service_level: Ultra + size: 100 + +- name: Delete Azure NetApp volume + netapp.azure.azure_rm_netapp_volume: + state: absent + resource_group: myResourceGroup + account_name: tests-netapp + pool_name: tests-pool + name: tests-volume2 + +''' + +RETURN = ''' +mount_path: + description: Returns mount_path of the Volume + returned: always + type: str + +''' + +import traceback + +AZURE_OBJECT_CLASS = 'NetAppAccount' +HAS_AZURE_MGMT_NETAPP = False +IMPORT_ERRORS = [] +ONE_GIB = 1073741824 + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.exceptions import ValidationError + from azure.core.exceptions import AzureError, ResourceNotFoundError +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +try: + from azure.mgmt.netapp.models import Volume, ExportPolicyRule, VolumePropertiesExportPolicy, VolumePatch + HAS_AZURE_MGMT_NETAPP = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +from ansible.module_utils.basic import to_native +from ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common import AzureRMNetAppModuleBase +from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import NetAppModule + + +class AzureRMNetAppVolume(AzureRMNetAppModuleBase): + ''' create or delete a volume ''' + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + file_path=dict(type='str', required=False), + pool_name=dict(type='str', required=True), + account_name=dict(type='str', required=True), + location=dict(type='str', required=False), + state=dict(choices=['present', 'absent'], default='present', type='str'), + subnet_name=dict(type='str', required=False, aliases=['subnet_id']), + virtual_network=dict(type='str', required=False), + size=dict(type='int', required=False), + vnet_resource_group_for_subnet=dict(type='str', required=False), + service_level=dict(type='str', required=False, choices=['Premium', 'Standard', 'Ultra']), + protocol_types=dict(type='list', elements='str'), + feature_flags=dict(type='dict') + ) + self.na_helper = NetAppModule() + self.parameters = {} + + # import errors are handled in AzureRMModuleBase + super(AzureRMNetAppVolume, self).__init__(derived_arg_spec=self.module_arg_spec, + required_if=[('state', 'present', ['location', 'file_path', 'subnet_name', 'virtual_network']), + ], + supports_check_mode=True) + + @staticmethod + def dict_from_volume_object(volume_object): + + def replace_list_of_objects_with_list_of_dicts(adict, key): + if adict.get(key): + adict[key] = [vars(x) for x in adict[key]] + + current_dict = vars(volume_object) + attr = 'subnet_id' + if attr in current_dict: + current_dict['subnet_name'] = current_dict.pop(attr).split('/')[-1] + attr = 'mount_targets' + replace_list_of_objects_with_list_of_dicts(current_dict, attr) + attr = 'export_policy' + if current_dict.get(attr): + attr_dict = vars(current_dict[attr]) + replace_list_of_objects_with_list_of_dicts(attr_dict, 'rules') + current_dict[attr] = attr_dict + return current_dict + + def get_azure_netapp_volume(self): + """ + Returns volume object for an existing volume + Return None if volume does not exist + """ + try: + volume_get = self.netapp_client.volumes.get(self.parameters['resource_group'], self.parameters['account_name'], + self.parameters['pool_name'], self.parameters['name']) + except (CloudError, ResourceNotFoundError): # volume does not exist + return None + return self.dict_from_volume_object(volume_get) + + def get_export_policy_rules(self): + # ExportPolicyRule(rule_index: int=None, unix_read_only: bool=None, unix_read_write: bool=None, + # kerberos5_read_only: bool=False, kerberos5_read_write: bool=False, kerberos5i_read_only: bool=False, + # kerberos5i_read_write: bool=False, kerberos5p_read_only: bool=False, kerberos5p_read_write: bool=False, + # cifs: bool=None, nfsv3: bool=None, nfsv41: bool=None, allowed_clients: str=None, has_root_access: bool=True + ptypes = self.parameters.get('protocol_types') + if ptypes is None: + return None + ptypes = [x.lower() for x in ptypes] + if 'nfsv4.1' in ptypes: + ptypes.append('nfsv41') + # only create a policy when NFSv4 is used (for now) + if 'nfsv41' not in ptypes: + return None + options = dict( + rule_index=1, + allowed_clients='0.0.0.0/0', + unix_read_write=True) + if self.has_feature('ignore_change_ownership_mode') and self.sdk_version >= '4.0.0': + # https://github.com/Azure/azure-sdk-for-python/issues/20356 + options['chown_mode'] = None + for protocol in ('cifs', 'nfsv3', 'nfsv41'): + options[protocol] = protocol in ptypes + return VolumePropertiesExportPolicy(rules=[ExportPolicyRule(**options)]) + + def create_azure_netapp_volume(self): + """ + Create a volume for the given Azure NetApp Account + :return: None + """ + options = self.na_helper.get_not_none_values_from_dict(self.parameters, ['protocol_types', 'service_level', 'tags', 'usage_threshold']) + rules = self.get_export_policy_rules() + if rules is not None: + # TODO: other options to expose ? + # options['throughput_mibps'] = 1.6 + # options['encryption_key_source'] = 'Microsoft.NetApp' + # options['security_style'] = 'Unix' + # options['unix_permissions'] = '0770' + # required for NFSv4 + options['export_policy'] = rules + subnet_id = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s'\ + % (self.azure_auth.subscription_id, + self.parameters['resource_group'] if self.parameters.get('vnet_resource_group_for_subnet') is None + else self.parameters['vnet_resource_group_for_subnet'], + self.parameters['virtual_network'], + self.parameters['subnet_name']) + volume_body = Volume( + location=self.parameters['location'], + creation_token=self.parameters['file_path'], + subnet_id=subnet_id, + **options + ) + try: + result = self.get_method('volumes', 'create_or_update')(body=volume_body, resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['pool_name'], volume_name=self.parameters['name']) + # waiting till the status turns Succeeded + while result.done() is not True: + result.result(10) + except (CloudError, ValidationError, AzureError) as error: + self.module.fail_json(msg='Error creating volume %s for Azure NetApp account %s and subnet ID %s: %s' + % (self.parameters['name'], self.parameters['account_name'], subnet_id, to_native(error)), + exception=traceback.format_exc()) + + def modify_azure_netapp_volume(self): + """ + Modify a volume for the given Azure NetApp Account + :return: None + """ + options = self.na_helper.get_not_none_values_from_dict(self.parameters, ['tags', 'usage_threshold']) + volume_body = VolumePatch( + **options + ) + try: + result = self.get_method('volumes', 'update')(body=volume_body, resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['pool_name'], volume_name=self.parameters['name']) + # waiting till the status turns Succeeded + while result.done() is not True: + result.result(10) + except (CloudError, ValidationError, AzureError) as error: + self.module.fail_json(msg='Error modifying volume %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_azure_netapp_volume(self): + """ + Delete a volume for the given Azure NetApp Account + :return: None + """ + try: + result = self.get_method('volumes', 'delete')(resource_group_name=self.parameters['resource_group'], + account_name=self.parameters['account_name'], + pool_name=self.parameters['pool_name'], volume_name=self.parameters['name']) + # waiting till the status turns Succeeded + while result.done() is not True: + result.result(10) + except (CloudError, AzureError) as error: + self.module.fail_json(msg='Error deleting volume %s for Azure NetApp account %s: %s' + % (self.parameters['name'], self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def validate_modify(self, modify, current): + disallowed = dict(modify) + disallowed.pop('tags', None) + disallowed.pop('usage_threshold', None) + if disallowed: + self.module.fail_json(msg="Error: the following properties cannot be modified: %s. Current: %s" % (repr(disallowed), repr(current))) + + def exec_module(self, **kwargs): + + # unlikely + self.fail_when_import_errors(IMPORT_ERRORS, HAS_AZURE_MGMT_NETAPP) + + # set up parameters according to our initial list + for key in list(self.module_arg_spec): + self.parameters[key] = kwargs[key] + # and common parameter + for key in ['tags']: + if key in kwargs: + self.parameters[key] = kwargs[key] + + # API is using 'usage_threshold' for 'size', and the unit is bytes + if self.parameters.get('size') is not None: + self.parameters['usage_threshold'] = ONE_GIB * self.parameters.pop('size') + + modify = None + current = self.get_azure_netapp_volume() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and current: + # ignore change in name + name = current.pop('name', None) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if name is not None: + current['name'] = name + if 'tags' in modify: + dummy, modify['tags'] = self.update_tags(current.get('tags')) + self.validate_modify(modify, current) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_azure_netapp_volume() + elif cd_action == 'delete': + self.delete_azure_netapp_volume() + elif modify: + self.modify_azure_netapp_volume() + + def get_mount_info(return_info): + if return_info is not None and return_info.get('mount_targets'): + return '%s:/%s' % (return_info['mount_targets'][0]['ip_address'], return_info['creation_token']) + return None + + mount_info = '' + if self.parameters['state'] == 'present': + return_info = self.get_azure_netapp_volume() + if return_info is None and not self.module.check_mode: + self.module.fail_json(msg='Error: volume %s was created successfully, but cannot be found.' % self.parameters['name']) + mount_info = get_mount_info(return_info) + if mount_info is None and not self.module.check_mode: + self.module.fail_json(msg='Error: volume %s was created successfully, but mount target(s) cannot be found - volume details: %s.' + % (self.parameters['name'], str(return_info))) + self.module.exit_json(changed=self.na_helper.changed, mount_path=mount_info, modify=modify) + + +def main(): + AzureRMNetAppVolume() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/azure/requirements.txt b/ansible_collections/netapp/azure/requirements.txt new file mode 100644 index 000000000..4badbc6ae --- /dev/null +++ b/ansible_collections/netapp/azure/requirements.txt @@ -0,0 +1,3 @@ +azure-mgmt-netapp +requests +xmltodict \ No newline at end of file diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/aliases b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/meta/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/tasks/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/tasks/main.yml new file mode 100644 index 000000000..9fa627e48 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_account/tasks/main.yml @@ -0,0 +1,41 @@ +- name: Create Azure NetApp account + azure_rm_netapp_account: + resource_group: laurentngroupnodash + name: tests-netapp1 + location: eastus + tags: {'test1': 'tesssttt', 'abc': 'xyz'} + register: output + +- assert: + that: output.changed + +- name: Create Azure NetApp account (Idempotency) + azure_rm_netapp_account: + resource_group: laurentngroupnodash + name: tests-netapp1 + location: eastus + tags: {'test1': 'tesssttt', 'abc': 'xyz'} + register: output + +- assert: + that: not output.changed + +- name: Delete Azure NetApp account + azure_rm_netapp_account: + state: absent + resource_group: laurentngroupnodash + name: tests-netapp1 + register: output + +- assert: + that: output.changed + +- name: Delete Azure NetApp account (Idempotency) + azure_rm_netapp_account: + state: absent + resource_group: laurentngroupnodash + name: tests-netapp1 + register: output + +- assert: + that: not output.changed diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/aliases b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/aliases new file mode 100644 index 000000000..3a0c0dc68 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive \ No newline at end of file diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/meta/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/meta/main.yml new file mode 100644 index 000000000..48f5726d8 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure \ No newline at end of file diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/tasks/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/tasks/main.yml new file mode 100644 index 000000000..9bbae4ae7 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_capacity_pool/tasks/main.yml @@ -0,0 +1,47 @@ +- name: Create Azure NetApp capacity pool + azure_rm_netapp_capacity_pool: + resource_group: NetworkWatcherRG + account_name: ansibleazure + name: tests-poolsss + location: eastus + size: 2 + service_level: Standard + register: output + +- assert: + that: output.changed + +- name: Create Azure NetApp capacity pool (Idempotency) + azure_rm_netapp_capacity_pool: + resource_group: NetworkWatcherRG + account_name: ansibleazure + name: tests-poolsss + location: eastus + size: 2 + service_level: Standard + register: output + +- assert: + that: not output.changed + +- name: Delete Azure NetApp capacity pool + azure_rm_netapp_capacity_pool: + state: absent + resource_group: NetworkWatcherRG + account_name: ansibleazure + name: tests-poolsss + register: output + +- assert: + that: output.changed + +- name: Delete Azure NetApp capacity pool (Idempotency) + azure_rm_netapp_capacity_pool: + state: absent + resource_group: NetworkWatcherRG + account_name: ansibleazure + name: tests-poolsss + register: output + +- assert: + that: not output.changed \ No newline at end of file diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/aliases b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/meta/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/tasks/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/tasks/main.yml new file mode 100644 index 000000000..c1c1cf5c2 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_snapshot/tasks/main.yml @@ -0,0 +1,51 @@ +- name: Create Azure NetApp snapshot + azure_rm_netapp_snapshot: + resource_group: laurentngroupnodash + account_name: tests-netapp + pool_name: test-pool + volume_name: tes + name: tests-snapshot + location: eastus + register: output + +- assert: + that: output.changed + +- name: Create Azure NetApp snapshot (Idempotency) + azure_rm_netapp_snapshot: + resource_group: laurentngroupnodash + account_name: tests-netapp + pool_name: test-pool + volume_name: tes + name: tests-snapshot + location: eastus + register: output + +- assert: + that: not output.changed + +- name: Delete Azure NetApp snapshot + azure_rm_netapp_snapshot: + state: absent + resource_group: laurentngroupnodash + account_name: tests-netapp + pool_name: test-pool + volume_name: tes + name: tests-snapshot + register: output + +- assert: + that: output.changed + +- name: Delete Azure NetApp snapshot (Idempotency) + azure_rm_netapp_snapshot: + state: absent + resource_group: laurentngroupnodash + account_name: tests-netapp + pool_name: test-pool + volume_name: tes + name: tests-snapshot + register: output + +- assert: + that: not output.changed diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/aliases b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/meta/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/tasks/main.yml b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/tasks/main.yml new file mode 100644 index 000000000..c5b2a4262 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/integration/targets/azure_rm_netapp_volume/tasks/main.yml @@ -0,0 +1,57 @@ +- name: Create Azure NetApp volume + azure_rm_netapp_volume: + resource_group: NetworkWatcherRG + account_name: ansibleazure + pool_name: tests-pool + name: tests-volume2 + location: eastus + size: 150 + file_path: tests-volume2 + virtual_network: azure_ansible + subnet_id: test + service_level: Standard + register: output + +- assert: + that: output.changed + +- name: Create Azure NetApp volume (Idempotency) + azure_rm_netapp_volume: + resource_group: NetworkWatcherRG + account_name: ansibleazure + pool_name: tests-pool + name: tests-volume2 + location: eastus + size: 150 + file_path: tests-volume2 + virtual_network: azure_ansible + subnet_id: test + service_level: Standard + register: output + +- assert: + that: not output.changed + +- name: Delete Azure NetApp volume + azure_rm_netapp_volume: + state: absent + resource_group: NetworkWatcherRG + account_name: ansibleazure + pool_name: tests-pool + name: tests-volume2 + register: output + +- assert: + that: output.changed + +- name: Delete Azure NetApp volume (Idempotency) + azure_rm_netapp_volume: + state: absent + resource_group: NetworkWatcherRG + account_name: ansibleazure + pool_name: tests-pool + name: tests-volume2 + register: output + +- assert: + that: not output.changed diff --git a/ansible_collections/netapp/azure/tests/runner/requirements/integration.cloud.azure.txt b/ansible_collections/netapp/azure/tests/runner/requirements/integration.cloud.azure.txt new file mode 100644 index 000000000..e75cf1e2b --- /dev/null +++ b/ansible_collections/netapp/azure/tests/runner/requirements/integration.cloud.azure.txt @@ -0,0 +1 @@ +azure-mgmt-netapp ; python_version >= '2.7' diff --git a/ansible_collections/netapp/azure/tests/runner/requirements/requirements-azure.txt b/ansible_collections/netapp/azure/tests/runner/requirements/requirements-azure.txt new file mode 100644 index 000000000..11852e2a5 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/runner/requirements/requirements-azure.txt @@ -0,0 +1,9 @@ +azure-common==1.1.11 +azure-mgmt-compute==4.4.0 +azure-storage==0.35.1 +azure-mgmt-monitor==0.5.2 +azure-mgmt-network==2.3.0 +azure-mgmt-resource==2.1.0 +azure-mgmt-storage==3.1.0 +azure-mgmt-netapp ; python_version >= '2.7' +azure-cli diff --git a/ansible_collections/netapp/azure/tests/runner/requirements/unit.cloud.azure.txt b/ansible_collections/netapp/azure/tests/runner/requirements/unit.cloud.azure.txt new file mode 100644 index 000000000..e75cf1e2b --- /dev/null +++ b/ansible_collections/netapp/azure/tests/runner/requirements/unit.cloud.azure.txt @@ -0,0 +1 @@ +azure-mgmt-netapp ; python_version >= '2.7' diff --git a/ansible_collections/netapp/azure/tests/runner/requirements/units.txt b/ansible_collections/netapp/azure/tests/runner/requirements/units.txt new file mode 100644 index 000000000..e75cf1e2b --- /dev/null +++ b/ansible_collections/netapp/azure/tests/runner/requirements/units.txt @@ -0,0 +1 @@ +azure-mgmt-netapp ; python_version >= '2.7' diff --git a/ansible_collections/netapp/azure/tests/unit/compat/__init__.py b/ansible_collections/netapp/azure/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/azure/tests/unit/compat/builtins.py b/ansible_collections/netapp/azure/tests/unit/compat/builtins.py new file mode 100644 index 000000000..f60ee6782 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/netapp/azure/tests/unit/compat/mock.py b/ansible_collections/netapp/azure/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/azure/tests/unit/compat/unittest.py b/ansible_collections/netapp/azure/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/azure/tests/unit/plugins/module_utils/test_netapp_module.py b/ansible_collections/netapp/azure/tests/unit/plugins/module_utils/test_netapp_module.py new file mode 100644 index 000000000..fb83c464e --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/plugins/module_utils/test_netapp_module.py @@ -0,0 +1,149 @@ +# Copyright (c) 2018 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp_module.py ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.netapp.azure.tests.unit.compat import unittest +from ansible_collections.netapp.azure.plugins.module_utils.netapp_module import NetAppModule as na_helper + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def test_get_cd_action_create(self): + ''' validate cd_action for create ''' + current = None + desired = {'state': 'present'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result == 'create' + + def test_get_cd_action_delete(self): + ''' validate cd_action for delete ''' + current = {'state': 'absent'} + desired = {'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result == 'delete' + + def test_get_cd_action(self): + ''' validate cd_action for returning None ''' + current = None + desired = {'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result is None + + def test_get_modified_attributes_for_no_data(self): + ''' validate modified attributes when current is None ''' + current = None + desired = {'name': 'test'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + def test_get_modified_attributes(self): + ''' validate modified attributes ''' + current = {'name': ['test', 'abcd', 'xyz', 'pqr'], 'state': 'present'} + desired = {'name': ['abcd', 'abc', 'xyz', 'pqr'], 'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == desired + + def test_get_modified_attributes_for_intersecting_mixed_list(self): + ''' validate modified attributes for list diff ''' + current = {'name': [2, 'four', 'six', 8]} + desired = {'name': ['a', 8, 'ab', 'four', 'abcd']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abcd']} + + def test_get_modified_attributes_for_intersecting_list(self): + ''' validate modified attributes for list diff ''' + current = {'name': ['two', 'four', 'six', 'eight']} + desired = {'name': ['a', 'six', 'ab', 'four', 'abc']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abc']} + + def test_get_modified_attributes_for_nonintersecting_list(self): + ''' validate modified attributes for list diff ''' + current = {'name': ['two', 'four', 'six', 'eight']} + desired = {'name': ['a', 'ab', 'abd']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abd']} + + def test_get_modified_attributes_for_list_of_dicts_no_data(self): + ''' validate modified attributes for list diff ''' + current = None + desired = {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {} + + def test_get_modified_attributes_for_intersecting_list_of_dicts(self): + ''' validate modified attributes for list diff ''' + current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]} + desired = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]} + + def test_get_modified_attributes_for_nonintersecting_list_of_dicts(self): + ''' validate modified attributes for list diff ''' + current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]} + desired = {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + + def test_get_modified_attributes_for_list_diff(self): + ''' validate modified attributes for list diff ''' + current = {'name': ['test', 'abcd'], 'state': 'present'} + desired = {'name': ['abcd', 'abc'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['abc']} + + def test_get_modified_attributes_for_no_change(self): + ''' validate modified attributes for same data in current and desired ''' + current = {'name': 'test'} + desired = {'name': 'test'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + def test_is_rename_action_for_empty_input(self): + ''' validate rename action for input None ''' + source = None + target = None + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result == source + + def test_is_rename_action_for_no_source(self): + ''' validate rename action when source is None ''' + source = None + target = 'test2' + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is False + + def test_is_rename_action_for_no_target(self): + ''' validate rename action when target is None ''' + source = 'test2' + target = None + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is True + + def test_is_rename_action(self): + ''' validate rename action ''' + source = 'test' + target = 'test2' + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is False diff --git a/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_account.py b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_account.py new file mode 100644 index 000000000..0d140b4a0 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_account.py @@ -0,0 +1,173 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: azure_rm_netapp_account''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys + +import pytest +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.azure.tests.unit.compat import unittest +from ansible_collections.netapp.azure.tests.unit.compat.mock import patch, Mock + +HAS_AZURE_RMNETAPP_IMPORT = True +try: + # At this point, python believes the module is already loaded, so the import inside azure_rm_netapp_volume will be skipped. + from ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_account \ + import AzureRMNetAppAccount as account_module +except ImportError: + HAS_AZURE_RMNETAPP_IMPORT = False + +HAS_AZURE_CLOUD_ERROR_IMPORT = True +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + HAS_AZURE_CLOUD_ERROR_IMPORT = False + +if not HAS_AZURE_CLOUD_ERROR_IMPORT and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required azure_exceptions on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockAzureClient(object): + ''' mock server connection to ONTAP host ''' + def __init__(self): + ''' save arguments ''' + self.valid_accounts = ['test1', 'test2'] + + def get(self, resource_group, account_name): # pylint: disable=unused-argument + if account_name not in self.valid_accounts: + invalid = Response() + invalid.status_code = 404 + raise CloudError(response=invalid) + return Mock(name=account_name) + + def create_or_update(self, body, resource_group, account_name): # pylint: disable=unused-argument,no-self-use + return None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.netapp_client = Mock() + self.netapp_client.accounts = MockAzureClient() + self._netapp_client = None + + def set_default_args(self): + resource_group = 'azure' + name = 'test1' + location = 'abc' + return dict({ + 'resource_group': resource_group, + 'name': name, + 'location': location + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + account_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_get_called_valid_account(self, client_f): + set_module_args(self.set_default_args()) + client_f.return_value = Mock() + client_f.side_effect = Mock() + my_obj = account_module() + my_obj.netapp_client.accounts = self.netapp_client.accounts + assert my_obj.get_azure_netapp_account() is not None + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_get_called_non_existing_account(self, client_f): + data = self.set_default_args() + data['name'] = 'invalid' + set_module_args(data) + client_f.return_value = Mock() + client_f.side_effect = Mock() + my_obj = account_module() + my_obj.netapp_client.accounts = self.netapp_client.accounts + assert my_obj.get_azure_netapp_account() is None + + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_account.AzureRMNetAppAccount.get_azure_netapp_account') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_account.AzureRMNetAppAccount.create_azure_netapp_account') + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_create_called(self, client_f, mock_create, mock_get): + data = dict(self.set_default_args()) + data['name'] = 'create' + data['tags'] = {'ttt': 'tesssttt', 'abc': 'xyz'} + set_module_args(data) + mock_get.return_value = None + client_f.return_value = Mock() + client_f.side_effect = Mock() + my_obj = account_module() + my_obj.netapp_client.accounts = self.netapp_client.accounts + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_create.assert_called_with() + + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_account.AzureRMNetAppAccount.get_azure_netapp_account') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_account.AzureRMNetAppAccount.delete_azure_netapp_account') + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_delete_called(self, client_f, mock_delete, mock_get): + data = dict(self.set_default_args()) + data['state'] = 'absent' + set_module_args(data) + mock_get.return_value = Mock() + client_f.return_value = Mock() + client_f.side_effect = Mock() + my_obj = account_module() + my_obj.netapp_client.accounts = self.netapp_client.accounts + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_delete.assert_called_with() diff --git a/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_capacity_pool.py b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_capacity_pool.py new file mode 100644 index 000000000..91c8eefd6 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_capacity_pool.py @@ -0,0 +1,197 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: azure_rm_netapp_capacity_pool''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys + +import pytest +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.azure.tests.unit.compat import unittest +from ansible_collections.netapp.azure.tests.unit.compat.mock import patch, Mock + +HAS_AZURE_RMNETAPP_IMPORT = True +try: + # At this point, python believes the module is already loaded, so the import inside azure_rm_netapp_volume will be skipped. + from ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool \ + import AzureRMNetAppCapacityPool as capacity_pool_module +except ImportError: + HAS_AZURE_RMNETAPP_IMPORT = False + +HAS_AZURE_CLOUD_ERROR_IMPORT = True +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + HAS_AZURE_CLOUD_ERROR_IMPORT = False + +if not HAS_AZURE_CLOUD_ERROR_IMPORT and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required azure_exceptions on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockAzureClient(object): + ''' mock server connection to ONTAP host ''' + def __init__(self): + ''' save arguments ''' + self.valid_pools = ['test1', 'test2'] + + def get(self, resource_group, account_name, pool_name): # pylint: disable=unused-argument + if pool_name not in self.valid_pools: + invalid = Response() + invalid.status_code = 404 + raise CloudError(response=invalid) + else: + return Mock(name=pool_name) + + def create_or_update(self, body, resource_group, account_name, pool_name): # pylint: disable=unused-argument + return None + + def update(self, body, resource_group, account_name, pool_name): # pylint: disable=unused-argument + return None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.netapp_client = Mock() + self.netapp_client.pools = MockAzureClient() + self._netapp_client = None + + def set_default_args(self): + resource_group = 'azure' + account_name = 'azure' + name = 'test1' + location = 'abc' + size = 1 + service_level = 'Standard' + return dict({ + 'resource_group': resource_group, + 'account_name': account_name, + 'name': name, + 'location': location, + 'size': size, + 'service_level': service_level + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + capacity_pool_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_get_called_valid_capacity_pool(self, client_f): + set_module_args(self.set_default_args()) + client_f.return_value = Mock() + my_obj = capacity_pool_module() + my_obj.netapp_client.pools = self.netapp_client.pools + assert my_obj.get_azure_netapp_capacity_pool() is not None + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_get_called_non_existing_capacity_pool(self, client_f): + data = self.set_default_args() + data['name'] = 'invalid' + set_module_args(data) + client_f.return_value = Mock() + my_obj = capacity_pool_module() + my_obj.netapp_client.pools = self.netapp_client.pools + assert my_obj.get_azure_netapp_capacity_pool() is None + + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool.AzureRMNetAppCapacityPool.get_azure_netapp_capacity_pool') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool.AzureRMNetAppCapacityPool.create_azure_netapp_capacity_pool') + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_create_called(self, client_f, mock_create, mock_get): + data = dict(self.set_default_args()) + data['name'] = 'create' + set_module_args(data) + mock_get.return_value = None + client_f.return_value = Mock() + my_obj = capacity_pool_module() + my_obj.netapp_client.pools = self.netapp_client.pools + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_create.assert_called_with() + + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool.AzureRMNetAppCapacityPool.get_azure_netapp_capacity_pool') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool.AzureRMNetAppCapacityPool.create_azure_netapp_capacity_pool') + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_modify_called(self, client_f, mock_modify, mock_get): + data = dict(self.set_default_args()) + data['name'] = 'create' + data['size'] = 3 + set_module_args(data) + mock_get.return_value = None + client_f.return_value = Mock() + my_obj = capacity_pool_module() + my_obj.netapp_client.pools = self.netapp_client.pools + with pytest.raises(AnsibleExitJson) as exc: + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_modify.assert_called_with() + + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool.AzureRMNetAppCapacityPool.get_azure_netapp_capacity_pool') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_capacity_pool.AzureRMNetAppCapacityPool.delete_azure_netapp_capacity_pool') + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_delete_called(self, client_f, mock_delete, mock_get): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_get.return_value = Mock() + client_f.return_value = Mock() + my_obj = capacity_pool_module() + my_obj.netapp_client.pools = self.netapp_client.pools + with pytest.raises(AnsibleExitJson) as exc: + data['state'] = 'absent' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_delete.assert_called_with() diff --git a/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_snapshot.py b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_snapshot.py new file mode 100644 index 000000000..0415a4039 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_snapshot.py @@ -0,0 +1,165 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: azure_rm_netapp_snapshot''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys + +import pytest +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.azure.tests.unit.compat import unittest +from ansible_collections.netapp.azure.tests.unit.compat.mock import patch, Mock + +HAS_AZURE_RMNETAPP_IMPORT = True +try: + # At this point, python believes the module is already loaded, so the import inside azure_rm_netapp_volume will be skipped. + from ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_snapshot \ + import AzureRMNetAppSnapshot as snapshot_module +except ImportError: + HAS_AZURE_RMNETAPP_IMPORT = False + +HAS_AZURE_CLOUD_ERROR_IMPORT = True +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + HAS_AZURE_CLOUD_ERROR_IMPORT = False + +if not HAS_AZURE_CLOUD_ERROR_IMPORT and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required azure_exceptions on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockAzureClient(object): + ''' mock server connection to ONTAP host ''' + def __init__(self): + ''' save arguments ''' + self.valid_snapshots = ['test1', 'test2'] + + def get(self, resource_group, account_name, pool_name, volume_name, snapshot_name): # pylint: disable=unused-argument + if snapshot_name not in self.valid_snapshots: + invalid = Response() + invalid.status_code = 404 + raise CloudError(response=invalid) + else: + return Mock(name=snapshot_name) + + def create(self, body, resource_group, account_name, pool_name, volume_name, snapshot_name): # pylint: disable=unused-argument + return None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.netapp_client = Mock() + self.netapp_client.pools = MockAzureClient() + self._netapp_client = None + + def set_default_args(self): + resource_group = 'azure' + account_name = 'azure' + pool_name = 'azure' + volume_name = 'azure' + name = 'test1' + location = 'abc' + return dict({ + 'resource_group': resource_group, + 'account_name': account_name, + 'pool_name': pool_name, + 'volume_name': volume_name, + 'name': name, + 'location': location + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + snapshot_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + def test_ensure_get_called_valid_snapshot(self, client_f): + set_module_args(self.set_default_args()) + client_f.return_value = Mock() + my_obj = snapshot_module() + my_obj.netapp_client.snapshots = self.netapp_client.snapshots + assert my_obj.get_azure_netapp_snapshot() is not None + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_snapshot.AzureRMNetAppSnapshot.get_azure_netapp_snapshot') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_snapshot.AzureRMNetAppSnapshot.create_azure_netapp_snapshot') + def test_ensure_create_called(self, mock_create, mock_get, client_f): + data = dict(self.set_default_args()) + data['name'] = 'create' + set_module_args(data) + mock_get.return_value = None + client_f.return_value = Mock() + my_obj = snapshot_module() + my_obj.netapp_client.snapshots = self.netapp_client.snapshots + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_create.assert_called_with() + + @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_snapshot.AzureRMNetAppSnapshot.get_azure_netapp_snapshot') + @patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_snapshot.AzureRMNetAppSnapshot.delete_azure_netapp_snapshot') + def test_ensure_delete_called(self, mock_delete, mock_get, client_f): + data = dict(self.set_default_args()) + data['state'] = 'absent' + set_module_args(data) + client_f.return_value = Mock() + mock_get.return_value = Mock() + my_obj = snapshot_module() + my_obj.netapp_client.snapshots = self.netapp_client.snapshots + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_delete.assert_called_with() diff --git a/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume.py b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume.py new file mode 100644 index 000000000..83c7f812e --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume.py @@ -0,0 +1,501 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: azure_rm_netapp_volume''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys + +import pytest +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.azure.tests.unit.compat.mock import patch, Mock + +HAS_AZURE_RMNETAPP_IMPORT = True +try: + # At this point, python believes the module is already loaded, so the import inside azure_rm_netapp_volume will be skipped. + from ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume \ + import AzureRMNetAppVolume as volume_module +except ImportError: + HAS_AZURE_RMNETAPP_IMPORT = False + +HAS_AZURE_CLOUD_ERROR_IMPORT = True +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + HAS_AZURE_CLOUD_ERROR_IMPORT = False + +if not HAS_AZURE_CLOUD_ERROR_IMPORT and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required azure_exceptions on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockAzureClient(object): + ''' mock server connection to ONTAP host ''' + def __init__(self): + ''' save arguments ''' + self.valid_volumes = ['test1', 'test2'] + + def get(self, resource_group, account_name, pool_name, volume_name): # pylint: disable=unused-argument + if volume_name in self.valid_volumes: + return Mock(name=volume_name, + subnet_id='/resid/whatever/subnet_name', + mount_targets=[Mock(ip_address='1.2.3.4')] + ) + + invalid = Response() + invalid.status_code = 404 + raise CloudError(response=invalid) + + def create_or_update(self, body, resource_group, account_name, pool_name, volume_name): # pylint: disable=unused-argument + return None + + def begin_create_or_update(self, body, resource_group_name, account_name, pool_name, volume_name): # pylint: disable=unused-argument + return Mock(done=Mock(side_effect=[False, True])) + + def begin_update(self, body, resource_group_name, account_name, pool_name, volume_name): # pylint: disable=unused-argument + return Mock(done=Mock(side_effect=[False, True])) + + def begin_delete(self, resource_group_name, account_name, pool_name, volume_name): # pylint: disable=unused-argument + return Mock(done=Mock(side_effect=[False, True])) + + +class MockAzureClientRaise(MockAzureClient): + ''' mock server connection to ONTAP host ''' + response = Mock(status_code=400, context=None, headers=[], text=lambda: 'Forced exception') + + def begin_create_or_update(self, body, resource_group_name, account_name, pool_name, volume_name): # pylint: disable=unused-argument + raise CloudError(MockAzureClientRaise.response) + + def begin_update(self, body, resource_group_name, account_name, pool_name, volume_name): # pylint: disable=unused-argument + raise CloudError(MockAzureClientRaise.response) + + def begin_delete(self, resource_group_name, account_name, pool_name, volume_name): # pylint: disable=unused-argument + raise CloudError(MockAzureClientRaise.response) + + +# using pytest natively, without unittest.TestCase +@pytest.fixture(name="patch_ansible") +def fixture_patch_ansible(): + with patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) as mocks: + yield mocks + + +def set_default_args(): + resource_group = 'azure' + account_name = 'azure' + pool_name = 'azure' + name = 'test1' + location = 'abc' + file_path = 'azure' + subnet_id = 'azure' + virtual_network = 'azure' + size = 100 + return dict({ + 'resource_group': resource_group, + 'account_name': account_name, + 'pool_name': pool_name, + 'name': name, + 'location': location, + 'file_path': file_path, + 'subnet_name': subnet_id, + 'virtual_network': virtual_network, + 'size': size, + 'protocol_types': 'nfs', + 'tags': {'owner': 'laurentn'} + }) + + +def test_module_fail_when_required_args_missing(patch_ansible): # pylint: disable=unused-argument + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + volume_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +def test_ensure_get_called_valid_volume(client_f): + set_module_args(set_default_args()) + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + assert my_obj.get_azure_netapp_volume() is not None + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +def test_ensure_get_called_non_existing_volume(client_f): + data = dict(set_default_args()) + data['name'] = 'invalid' + set_module_args(data) + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + assert my_obj.get_azure_netapp_volume() is None + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.create_azure_netapp_volume') +def test_ensure_create_called(mock_create, mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'create' + set_module_args(data) + mock_get.side_effect = [ + None, # first get + dict(mount_targets=[dict(ip_address='11.22.33.44')], # get after create + creation_token='abcd') + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + expected_mount_path = '11.22.33.44:/abcd' + assert exc.value.args[0]['mount_path'] == expected_mount_path + mock_create.assert_called_with() + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_create(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'create' + data['protocol_types'] = ['nfsv4.1'] + set_module_args(data) + mock_get.side_effect = [ + None, # first get + dict(mount_targets=[dict(ip_address='11.22.33.44')], # get after create + creation_token='abcd') + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + expected_mount_path = '11.22.33.44:/abcd' + assert exc.value.args[0]['mount_path'] == expected_mount_path + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_create_exception(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'create' + data['protocol_types'] = 'nfsv4.1' + set_module_args(data) + mock_get.side_effect = [ + None, # first get + dict(mount_targets=[dict(ip_address='11.22.33.44')], # get after create + creation_token='abcd') + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClientRaise() + with pytest.raises(AnsibleFailJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + expected_msg = 'Error creating volume' + assert expected_msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.create_azure_netapp_volume') +def test_ensure_create_called_but_fail_on_get(mock_create, mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'create' + set_module_args(data) + mock_get.side_effect = [ + None, # first get + dict(mount_targets=None, # get after create + creation_token='abcd') + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleFailJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + error = 'Error: volume create was created successfully, but mount target(s) cannot be found - volume details:' + assert exc.value.args[0]['msg'].startswith(error) + mock_create.assert_called_with() + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.create_azure_netapp_volume') +def test_ensure_create_called_but_fail_on_mount_target(mock_create, mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'create' + set_module_args(data) + mock_get.return_value = None + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleFailJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + error = 'Error: volume create was created successfully, but cannot be found.' + assert exc.value.args[0]['msg'] == error + mock_create.assert_called_with() + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.delete_azure_netapp_volume') +def test_ensure_delete_called(mock_delete, mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['state'] = 'absent' + set_module_args(data) + client_f.return_value = Mock() + mock_get.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + mock_delete.assert_called_with() + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_delete(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'delete' + data['state'] = 'absent' + set_module_args(data) + mock_get.side_effect = [ + dict(mount_targets=[dict(ip_address='11.22.33.44')], # first get + creation_token='abcd') + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + expected_mount_path = '' + assert exc.value.args[0]['mount_path'] == expected_mount_path + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_delete_exception(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'delete' + data['state'] = 'absent' + set_module_args(data) + mock_get.side_effect = [ + dict(mount_targets=[dict(ip_address='11.22.33.44')], # first get + creation_token='abcd') + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClientRaise() + with pytest.raises(AnsibleFailJson) as exc: + # add default args for exec_module + data['debug'] = False + my_obj.exec_module(**data) + expected_msg = 'Error deleting volume' + assert expected_msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_modify(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'modify' + data['size'] = 200 + data['tags'] = {'added_tag': 'new_tag'} + set_module_args(data) + mock_get.side_effect = [ + dict(mount_targets=[dict(ip_address='11.22.33.44')], # first get + creation_token='abcd', + tags={}, + usage_threshold=0), + dict(mount_targets=[dict(ip_address='11.22.33.44')], # get after modify + creation_token='abcd', + usage_threshold=10000000) + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleExitJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + assert exc.value.args[0]['changed'] + print('modify', exc.value.args[0]) + expected_mount_path = '11.22.33.44:/abcd' + assert exc.value.args[0]['mount_path'] == expected_mount_path + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_modify_exception(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'modify' + data['size'] = 200 + set_module_args(data) + mock_get.side_effect = [ + dict(mount_targets=[dict(ip_address='11.22.33.44')], # first get + creation_token='abcd', + usage_threshold=0), + dict(mount_targets=[dict(ip_address='11.22.33.44')], # get after modify + creation_token='abcd', + usage_threshold=10000000) + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClientRaise() + with pytest.raises(AnsibleFailJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + expected_msg = 'Error modifying volume' + assert expected_msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +@patch('ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume.AzureRMNetAppVolume.get_azure_netapp_volume') +def test_modify_not_supported(mock_get, client_f, patch_ansible): # pylint: disable=unused-argument + data = dict(set_default_args()) + data['name'] = 'modify' + data['location'] = 'east' + set_module_args(data) + mock_get.side_effect = [ + dict(mount_targets=[dict(ip_address='11.22.33.44')], # first get + creation_token='abcd', + usage_threshold=0, + location='west', + name='old_name'), + dict(mount_targets=[dict(ip_address='11.22.33.44')], # get after modify + creation_token='abcd', + usage_threshold=10000000) + ] + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.azure_auth = Mock(subscription_id='1234') + my_obj._new_style = True + my_obj.netapp_client.volumes = MockAzureClient() + with pytest.raises(AnsibleFailJson) as exc: + # add default args for exec_module + data['state'] = 'present' + data['debug'] = False + my_obj.exec_module(**data) + expected_msg = "Error: the following properties cannot be modified: {'location': 'east'}" + assert expected_msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.netapp_client') +def test_get_export_policy_rules(client_f, patch_ansible): + set_module_args(set_default_args()) + client_f.return_value = Mock() + my_obj = volume_module() + my_obj.netapp_client.volumes = MockAzureClient() + rules = my_obj.get_export_policy_rules() + assert rules is None + del my_obj.parameters['protocol_types'] + rules = my_obj.get_export_policy_rules() + assert rules is None + my_obj.parameters['protocol_types'] = ['nFsv4.1'] + rules = my_obj.get_export_policy_rules() + assert rules is not None + rules = vars(rules) + assert 'rules' in rules + rules = rules['rules'] + assert rules + rule = vars(rules[0]) + assert rule['nfsv41'] + assert not rule['cifs'] + + +def test_dict_from_object(): + set_module_args(set_default_args()) + my_obj = volume_module() + # just for fun + module_dict = my_obj.dict_from_volume_object(my_obj) + print('Module dict', module_dict) + + rule_object = Mock() + rule_object.ip_address = '10.10.10.10' + export_policy_object = Mock() + export_policy_object.rules = [rule_object] + volume_object = Mock() + volume_object.export_policy = export_policy_object + volume_dict = my_obj.dict_from_volume_object(volume_object) + print('Volume dict', volume_dict) + assert 'export_policy' in volume_dict + assert 'rules' in volume_dict['export_policy'] + assert isinstance(volume_dict['export_policy']['rules'], list) + assert len(volume_dict['export_policy']['rules']) == 1 + assert 'ip_address' in volume_dict['export_policy']['rules'][0] diff --git a/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume_import.py b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume_import.py new file mode 100644 index 000000000..13d3bba29 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/plugins/modules/test_azure_rm_netapp_volume_import.py @@ -0,0 +1,74 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: azure_rm_netapp_volume''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys + +import pytest +# from typing import Collection +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.azure.tests.unit.compat.mock import patch + + +if sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing imports on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +@pytest.fixture(name="patch_ansible") +def fixture_patch_ansible(): + with patch.multiple(basic.AnsibleModule, + fail_json=fail_json) as mocks: + yield mocks + + +# @patch('ansible_collections.netapp.azure.plugins.module_utils.azure_rm_netapp_common.AzureRMNetAppModuleBase.__init__') +def test_import_error(): + orig_import = __import__ + + def import_mock(name, *args): + print('importing: %s' % name) + if name.startswith('ansible_collections.netapp.azure.plugins.modules'): + # force a relead to go through secondary imports + sys.modules.pop(name, None) + if name in ('azure.core.exceptions', 'azure.mgmt.netapp.models'): + raise ImportError('forced error on %s' % name) + return orig_import(name, *args) + + # mock_base.return_value = Mock() + data = dict() + set_module_args(data) + with patch('builtins.__import__', side_effect=import_mock): + from ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume import IMPORT_ERRORS + assert any('azure.core.exceptions' in error for error in IMPORT_ERRORS) + assert any('azure.mgmt.netapp.models' in error for error in IMPORT_ERRORS) + + +def test_main(patch_ansible): # pylint: disable=unused-argument + data = dict() + set_module_args(data) + from ansible_collections.netapp.azure.plugins.modules.azure_rm_netapp_volume import main + with pytest.raises(AnsibleFailJson) as exc: + main() + expected_msg = "missing required arguments:" + assert expected_msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/azure/tests/unit/requirements.txt b/ansible_collections/netapp/azure/tests/unit/requirements.txt new file mode 100644 index 000000000..0b89f6365 --- /dev/null +++ b/ansible_collections/netapp/azure/tests/unit/requirements.txt @@ -0,0 +1,3 @@ +azure-mgmt-netapp ; python_version >= '2.7' +msrestazure ; python_version >= '3.5' +requests ; python_version >= '2.7' diff --git a/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..2e7814624 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,210 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to report a bug in netapp.cloudmanager!** + + + ⚠ + Verify first that your issue is not [already reported on + GitHub][issue search] and keep in mind that we may have to keep + the current behavior because [every change breaks someone's + workflow][XKCD 1172]. + We try to be mindful about this. + + Also test if the latest release and devel branch are affected too. + + + **Tip:** If you are seeking community support, please consider + [Join our Slack community][ML||IRC]. + + + + [ML||IRC]: + https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg + + [issue search]: ../search?q=is%3Aissue&type=issues + + [XKCD 1172]: https://xkcd.com/1172/ + + +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with netapp.cloudmanager from the devel branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + + + **Tip:** Cannot find it in this repository? Please be advised that + the source for some parts of the documentation are hosted outside + of this repository. If the page you are reporting describes + modules/plugins/etc that are not officially supported by the + Ansible Core Engineering team, there is a good chance that it is + coming from one of the [Ansible Collections maintained by the + community][collections org]. If this is the case, please make sure + to file an issue under the appropriate project there instead. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` below, under + the prompt line. Please don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + value: | + $ ansible --version + placeholder: | + $ ansible --version + ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) + config file = None + configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = ~/src/github/ansible/ansible/lib/ansible + ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections + executable location = bin/ansible + python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] + jinja version = 2.11.3 + libyaml = True + validations: + required: true + +- type: textarea + attributes: + label: Cloud Manager Collection Version + description: >- + Cloud Manager Collection Version. Run `ansible-galaxy collection` and copy the entire output + render: console + value: | + $ ansible-galaxy collection list + validations: + required: true + +- type: textarea + attributes: + label: Playbook + description: >- + The task from the playbook that is give you the issue + render: console + validations: + required: true + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: | + 1. Implement the following playbook: + + ```yaml + --- + # ping.yml + - hosts: all + gather_facts: false + tasks: + - ping: + ... + ``` + 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv` + 3. An error occurs. + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y and was shocked + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output and don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + placeholder: >- + Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))} + Exception: + Traceback (most recent call last): + File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main + status = self.run(options, args) + File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run + wb.build(autobuilding=True) + File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build + self.requirement_set.prepare_files(self.finder) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files + ignore_dependencies=self.ignore_dependencies)) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file + session=self.session, hashes=hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url + hashes=hashes + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url + hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url + stream=True, + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get + return self.request('GET', url, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request + return super(PipSession, self).request(method, url, *args, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request + resp = self.send(prep, **send_kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send + r = adapter.send(request, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send + resp = super(CacheControlAdapter, self).send(request, **kw) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send + raise SSLError(e, request=request) + SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),)) + ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2. + ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1. + validations: + required: true + + +- type: markdown + attributes: + value: > + *One last thing...* + + + Thank you for your collaboration! + + +... diff --git a/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..8a76456de --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,100 @@ +--- +name: ✨ Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to suggest a feature for netapp.cloudmanager!** + + 💡 + Before you go ahead with your request, please first consider if it + would be useful for majority of the netapp.cloudmanager users. As a + general rule of thumb, any feature that is only of interest to a + small sub group should be [implemented in a third-party Ansible + Collection][contribute to collections] or maybe even just your + project alone. Be mindful of the fact that the essential + netapp.cloudmanager features have a broad impact. + + +
+ + ❗ Every change breaks someone's workflow. + + + + [![❗ Every change breaks someone's workflow. + ](https://imgs.xkcd.com/comics/workflow.png) + ](https://xkcd.com/1172/) +
+ + + ⚠ + Verify first that your idea is not [already requested on + GitHub][issue search]. + + Also test if the main branch does not already implement this. + + +- type: textarea + attributes: + label: Summary + description: > + Describe the new feature/improvement you would like briefly below. + + + What's the problem this feature will solve? + + What are you trying to do, that you are unable to achieve + with netapp.cloudmanager as it currently stands? + + + * Provide examples of real-world use cases that this would enable + and how it solves the problem you described. + + * How do you solve this now? + + * Have you tried to work around the problem using other tools? + + * Could there be a different approach to solving this issue? + + placeholder: >- + I am trying to do X with netapp.cloudmanager from the devel branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of netapp.cloudmanager because of Z. + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: >- + I asked on https://stackoverflow.com/.... and the community + advised me to do X, Y and Z. + validations: + required: true + +... diff --git a/ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml b/ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml new file mode 100644 index 000000000..eebbe7fb8 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.cloudmanager Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on Cloudmanager + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.12 + run: pip install https://github.com/ansible/ansible/archive/stable-2.12.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/cloudmanager/ + rsync -av . ansible_collections/netapp/cloudmanager/ --exclude ansible_collections/netapp/cloudmanager/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/cloudmanager/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/cloudmanager/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/cloudmanager/ + verbose: true diff --git a/ansible_collections/netapp/cloudmanager/.github/workflows/main.yml b/ansible_collections/netapp/cloudmanager/.github/workflows/main.yml new file mode 100644 index 000000000..2548b1099 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/.github/workflows/main.yml @@ -0,0 +1,47 @@ +name: NetApp.cloudmanager Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }} on Cloudmanager + runs-on: ubuntu-latest + strategy: + matrix: + fail-fast: false + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - stable-2.14 + - devel + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + # Ansible 2.14 requires 3.9 as a minimum + python-version: 3.9 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/cloudmanager/ + rsync -av . ansible_collections/netapp/cloudmanager/ --exclude ansible_collections/netapp/cloudmanager/ + + + - name: Run sanity tests Cloudmanager + run: ansible-test sanity --docker -v --color + working-directory: ansible_collections/netapp/cloudmanager/ diff --git a/ansible_collections/netapp/cloudmanager/CHANGELOG.rst b/ansible_collections/netapp/cloudmanager/CHANGELOG.rst new file mode 100644 index 000000000..262da39fa --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/CHANGELOG.rst @@ -0,0 +1,325 @@ +============================================ +NetApp CloudManager Collection Release Notes +============================================ + +.. contents:: Topics + + +v21.22.0 +======== + +Minor Changes +------------- + +- Add ``svm_name`` option in CVO for AWS, AZURE and GCP creation and update. + +v21.21.0 +======== + +Minor Changes +------------- + +- na_cloudmanager_connector_azure - expose connector managed system identity principal_id to perform role assignment +- na_cloudmanager_cvo_azure - Add new ``storage_type`` value Premium_ZRS +- na_cloudmanager_cvo_azure - Add parameter ``availability_zone_node1`` and ``availability_zone_node2`` for CVO Azure HA location + +v21.20.1 +======== + +Bugfixes +-------- + +- new meta/execution-environment.yml is failing ansible-builder sanitize step. + +v21.20.0 +======== + +Minor Changes +------------- + +- Add ``availability_zone`` option in CVO Azure on the location configuration. +- Add ``subnet_path`` option in CVO GCP. +- na_cloudmanager_cvo_aws - Add new parameter ``cluster_key_pair_name`` to support SSH authentication method key pair. +- na_cloudmanager_volume - Support AWS FsxN working environment. + +Bugfixes +-------- + +- na_cloudmanager_connector_gcp - Fix default machine_type value on the GCP connector. + +v21.19.0 +======== + +Minor Changes +------------- + +- Support ``writing_speed_state`` modification on AWS, AZURE and GCP CVOs. + +v21.18.0 +======== + +Minor Changes +------------- + +- na_cloudmanager_connector_azure - Support full ``subnet_id`` and ``vnet_id``. + +v21.17.0 +======== + +Minor Changes +------------- + +- na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters ``import_file_system`` and ``file_system_id``. +- na_cloudmanager_connector_azure - Support user defined ``storage_account`` name. The ``storage_account`` can be created automatically. When ``storage_account`` is not set, the name is constructed by appending 'sa' to the connector ``name``. +- na_cloudmanager_cvo_aws - Support license_type update +- na_cloudmanager_cvo_azure - Support license_type update +- na_cloudmanager_cvo_gcp - Support license_type update + +v21.16.0 +======== + +Minor Changes +------------- + +- na_cloudmanager_connector_gcp - when using the user application default credential authentication by running the command gcloud auth application-default login, ``gcp_service_account_path`` is not needed. + +Bugfixes +-------- + +- Add check when volume is capacity tiered. +- na_cloudmanager_connector_azure - Fix string formatting error when deleting the connector. + +v21.15.0 +======== + +Minor Changes +------------- + +- Add the description of client_id based on the cloudmanager UI. +- Set license_type default value 'capacity-paygo' for single node 'ha-capacity-paygo' for HA and 'capacity_package_name' value 'Essential' + +v21.14.0 +======== + +Minor Changes +------------- + +- na_cloudmanager_snapmirror - Add FSX to snapmirror. + +Bugfixes +-------- + +- CVO working environment clusterProperties is deprecated. Make changes accordingly. Add CVO update status check on ``instance_type`` change. + +v21.13.0 +======== + +Minor Changes +------------- + +- Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true. +- Add ontap image upgrade on AWS, AZURE and GCP CVOs if ``upgrade_ontap_version`` is true and ``ontap_version`` is provided with a specific version. ``use_latest_version`` has to be false. +- na_cloudmanager_connector_aws - automatically fetch client_id and instance_id for delete. +- na_cloudmanager_connector_aws - make the module idempotent for create and delete. +- na_cloudmanager_connector_aws - report client_id and instance_id if connector already exists. +- na_cloudmanager_cvo_aws - Support instance_type update +- na_cloudmanager_cvo_azure - Support instance_type update +- na_cloudmanager_cvo_gcp - Support instance_type update +- na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info +- na_cloudmanager_volume - Report error if the volume properties cannot be modified. Add support ``tiering_policy`` and ``snapshot_policy_name`` modification. + +Bugfixes +-------- + +- na_cloudmanager_cvo_gcp - handle extra two auto-gen GCP labels to prevent update ``gcp_labels`` failure. + +New Modules +----------- + +- netapp.cloudmanager.na_cloudmanager_aws_fsx - Cloud ONTAP file system(FSX) in AWS + +v21.12.1 +======== + +Bugfixes +-------- + +- na_cloudmanager_connector_aws - Fix default ami not based on the region in resource file +- na_cloudmanager_snapmirror - report actual error rather than None with "Error getting destination info". + +v21.12.0 +======== + +Minor Changes +------------- + +- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. +- na_cloudmanager_cvo_azure - Add extra tag handling on azure_tag maintenance +- na_cloudmanager_cvo_gcp - Add extra label hendling for HA and only allow add new labels on gcp_labels +- na_cloudmanager_snapmirror - working environment get information api not working for onprem is fixed + +Bugfixes +-------- + +- Fix cannot find working environment if ``working_environment_name`` is provided + +v21.11.0 +======== + +Minor Changes +------------- + +- Add CVO modification unit tests +- Adding new parameter ``capacity_package_name`` for all CVOs creation with capacity based ``license_type`` capacity-paygo or ha-capacity-paygo for HA. +- all modules - better error reporting if refresh_token is not valid. +- na_cloudmanager_connector_gcp - automatically fetch client_id for delete. +- na_cloudmanager_connector_gcp - make the module idempotent for create and delete. +- na_cloudmanager_connector_gcp - report client_id if connector already exists. +- na_cloudmanager_cvo_aws - Add unit tests for capacity based license support. +- na_cloudmanager_cvo_azure - Add unit tests for capacity based license support. +- na_cloudmanager_cvo_gcp - Add unit tests for capacity based license support and delete cvo. +- netapp.py - improve error handling with error content. + +Bugfixes +-------- + +- na_cloudmanager_connector_gcp - typeError when using proxy certificates. + +v21.10.0 +======== + +Minor Changes +------------- + +- Only these parameters will be modified on the existing CVOs. svm_passowrd will be updated on each run. +- na_cloudmanager_cvo_aws - Support update on svm_password, tier_level, and aws_tag. +- na_cloudmanager_cvo_aws - add new parameter ``kms_key_id`` and ``kms_key_arn`` as AWS encryption parameters to support AWS CVO encryption +- na_cloudmanager_cvo_azure - Add new parameter ``ha_enable_https`` for HA CVO to enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false. +- na_cloudmanager_cvo_azure - Support update on svm_password, tier_level, and azure_tag. +- na_cloudmanager_cvo_azure - add new parameter ``azure_encryption_parameters`` to support AZURE CVO encryption +- na_cloudmanager_cvo_gcp - Support update on svm_password, tier_level, and gcp_labels. +- na_cloudmanager_cvo_gcp - add new parameter ``gcp_encryption_parameters`` to support GCP CVO encryption + +Bugfixes +-------- + +- na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation + +v21.9.0 +======= + +Minor Changes +------------- + +- na_cloudmanager - Support pd-balanced in ``gcp_volume_type`` for CVO GCP, ``provider_volume_type`` in na_cloudmanager_snapmirror and na_cloudmanager_volume. +- na_cloudmanager_connector_azure - Change default value of ``virtual_machine_size`` to Standard_DS3_v2. +- na_cloudmanager_cvo_gcp - Add selflink support on subnet_id, vpc0_node_and_data_connectivity, vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet0_node_and_data_connectivity, subnet1_cluster_connectivity, subnet2_ha_connectivity, and subnet3_data_replication. + +v21.8.0 +======= + +Major Changes +------------- + +- Adding stage environment to all modules in cloudmanager + +Minor Changes +------------- + +- na_cloudmanager - Support service account with new options ``sa_client_id`` and ``sa_secret_key`` to use for API operations. + +Bugfixes +-------- + +- na_cloudmanager_aggregate - accept client_id end with or without 'clients' +- na_cloudmanager_cifs_server - accept client_id end with or without 'clients' +- na_cloudmanager_connector_aws - accept client_id end with or without 'clients' +- na_cloudmanager_connector_azure - accept client_id end with or without 'clients' +- na_cloudmanager_connector_gcp - accept client_id end with or without 'clients' +- na_cloudmanager_cvo_aws - accept client_id end with or without 'clients' +- na_cloudmanager_cvo_azure - accept client_id end with or without 'clients' +- na_cloudmanager_cvo_gcp - accept client_id end with or without 'clients' +- na_cloudmanager_info - accept client_id end with or without 'clients' +- na_cloudmanager_nss_account - accept client_id end with or without 'clients' +- na_cloudmanager_snapmirror - accept client_id end with or without 'clients' +- na_cloudmanager_volume - accept client_id end with or without 'clients' + +v21.7.0 +======= + +Minor Changes +------------- + +- na_cloudmanager_aggregate - Add provider_volume_type gp3 support. +- na_cloudmanager_connector_gcp - rename option ``service_account_email`` and ``service_account_path`` to ``gcp_service_account_email`` and ``gcp_service_account_path`` respectively. +- na_cloudmanager_cvo_aws - Add ebs_volume_type gp3 support. +- na_cloudmanager_snapmirror - Add provider_volume_type gp3 support. +- na_cloudmanager_volume - Add aggregate_name support on volume creation. +- na_cloudmanager_volume - Add provider_volume_type gp3 support. + +Bugfixes +-------- + +- na_cloudmanager_aggregate - Improve error message +- na_cloudmanager_connector_azure - Add subnet_name as aliases of subnet_id, vnet_name as aliases of vnet_id. +- na_cloudmanager_connector_azure - Fix KeyError client_id +- na_cloudmanager_cvo_gcp - Apply network_project_id check on vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, subnet2_ha_connectivity, subnet3_data_replication +- na_cloudmanager_nss_account - Improve error message +- na_cloudmanager_volume - Improve error message + +v21.6.0 +======= + +Bugfixes +-------- + +- na_cloudmanager_cifs_server - Fix incorrect API call when is_workgroup is true +- na_cloudmanager_connector_azure - Change client_id as optional +- na_cloudmanager_connector_azure - Fix python error - msrest.exceptions.ValidationError. Parameter 'Deployment.properties' can not be None. +- na_cloudmanager_connector_azure - Fix wrong example on the document and update account_id is required field on deletion. +- na_cloudmanager_cvo_gcp - Change vpc_id from optional to required. + +New Modules +----------- + +- netapp.cloudmanager.na_cloudmanager_snapmirror - NetApp Cloud Manager SnapMirror + +v21.5.0 +======= + +Minor Changes +------------- + +- na_cloudmanager_connector_aws - Return newly created Azure client ID in cloud manager, instance ID and account ID. New option ``proxy_certificates``. +- na_cloudmanager_cvo_aws - Return newly created AWS working_environment_id. +- na_cloudmanager_cvo_azure - Return newly created AZURE working_environment_id. +- na_cloudmanager_cvo_gcp - Return newly created GCP working_environment_id. + +Bugfixes +-------- + +- na_cloudmanager_cvo_aws - Fix incorrect placement of platformSerialNumber in the resulting json structure + +v21.4.0 +======= + +New Modules +----------- + +- netapp.cloudmanager.na_cloudmanager_connector_azure - NetApp Cloud Manager connector for Azure. +- netapp.cloudmanager.na_cloudmanager_connector_gcp - NetApp Cloud Manager connector for GCP. +- netapp.cloudmanager.na_cloudmanager_cvo_azure - NetApp Cloud Manager CVO/working environment in single or HA mode for Azure. +- netapp.cloudmanager.na_cloudmanager_info - NetApp Cloud Manager info + +v21.3.0 +======= + +New Modules +----------- + +- netapp.cloudmanager.na_cloudmanager_aggregate - NetApp Cloud Manager Aggregate +- netapp.cloudmanager.na_cloudmanager_cifs_server - NetApp Cloud Manager cifs server +- netapp.cloudmanager.na_cloudmanager_connector_aws - NetApp Cloud Manager connector for AWS +- netapp.cloudmanager.na_cloudmanager_cvo_aws - NetApp Cloud Manager CVO for AWS +- netapp.cloudmanager.na_cloudmanager_nss_account - NetApp Cloud Manager nss account +- netapp.cloudmanager.na_cloudmanager_volume - NetApp Cloud Manager volume diff --git a/ansible_collections/netapp/cloudmanager/COPYING b/ansible_collections/netapp/cloudmanager/COPYING new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/cloudmanager/FILES.json b/ansible_collections/netapp/cloudmanager/FILES.json new file mode 100644 index 000000000..bc550db32 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/FILES.json @@ -0,0 +1,1006 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ab031084649c1857b4f90b7ed68ee3f530d51892ca81846bfbdd4657550cccc", + "format": 1 + }, + { + "name": "execution_environments/from_galaxy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments/from_galaxy/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4a1200215a7804d7f5407d160f23bfb6f5da7d3b15fd06df5bc7b0820e35879", + "format": 1 + }, + { + "name": "execution_environments/from_galaxy/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9079c32340fc566d2c9352283abfd96fedd35d06e736f637e57cbfa1732b5513", + "format": 1 + }, + { + "name": "execution_environments/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "881dc2d94eb8af2fdea0ff74effa171a81cf0200013720242f87a920f044d2c6", + "format": 1 + }, + { + "name": "execution_environments/from_github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments/from_github/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4a1200215a7804d7f5407d160f23bfb6f5da7d3b15fd06df5bc7b0820e35879", + "format": 1 + }, + { + "name": "execution_environments/from_github/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92a16aeff7681eb4188ce1140409d685ff71e00ffe940065fa1c2e888c470e88", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aaff48dee8012c4d4002fe11b7addd01d4439dbc4a06620a65e7ad75d9cead37", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c93c345f0b4049d69b114933fb59b79f9d63ce8e65717c6f1c1a8801721977d", + "format": 1 + }, + { + "name": "plugins/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c82ee692702ec1dd604cdbc38ff252114e5204e1b0627045a66c9451e7a918ac", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfd1e767b765a237c77dcd6961e2b413f1a6c268f041e371c39e986273c6a235", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5329d63aec46a1d4f9bbcd50b2117269f50eae6db319ba0d2ccd26cdcc90020", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_cvo_gcp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "604ae4f223375285adc537eecf75529c19bfc56229c457ba6532bb646e14f0a5", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_cvo_azure.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7199e129aa6938baa4e01b1bffc3ae39a0c91e395e5374aebd21f9168771e3ec", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_aggregate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77d48b15245ed5503206eab9491463a873e39e7bb3dd2e875c1124eb38d11181", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_cifs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09e71987c20d1bdbc163f0a447e4a267a8ee14c3402f4e88fcb8d17d8074ba06", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_nss_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5118fa9fb7da5d866393b299a13024f605c2cd00888b9cae58415a3795ff86f3", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_connector_aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01cec957530493b5de9a91d26b3ea498b60292e9b7850a4b93af43f9e442b1e5", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_aws_fsx.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5814b7a0b08b996862489ed00f1168fc1e166cea5a68db98ea99170830def196", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_connector_azure.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fbdf14385275e7bff2fc06e7f80f332cf6e6492435878e5101b2cbeb20e02d3", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_snapmirror.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4180ef0fe86de31699e1dc515b0c92b01a2991e17543a2676e0cb8f2ef39c733", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_cvo_aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "166e348ed61b69b4eba8e0f284c4120fe28cb83bb0c8cac74bd894ace231cac7", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84ae777fa4e1ac39c69dd5e6bb85254cb4298759cfa290a256a5d0c55144d2ee", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_connector_gcp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35af2e93d64674305e68f8b081260a347aaa507640a634d2262a36981e0b6926", + "format": 1 + }, + { + "name": "plugins/modules/na_cloudmanager_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90df6c82602c4ef95ac785cf7e39ff27276ccc34492a367a37911d9e3de401a4", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc3268366a078318126a00095e160b71ce50940415c07b5cbeed3e99b09b5777", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_module_open.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "781ebe6901008b71e0d0845e8b17c67634cd5169f0153d287fb57cde90289637", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba96ce33232f13bd447b2fb68b0fa9a69d35885b6baf79fd5c0df5a0b2a64672", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "62f6297e685f7f5856ff20d24394a42a3c8e63a4f59cb0aa8d60fd7cce7b76f1", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7722c49e69cfb0615f3ed6f73923492b542063d87c855a742514d53c5d950e5f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8466aaf93284df6abe33f8bdc8c84c2a41bc639c828567133c70f0652e9e87f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22e5e9c0ebb9656ab518bfd5330bc716c9a740f2a06454e0938581634e255b79", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6333f22900738d4bc76a7f262f958e2ad97503906d6bf84b1a9931ab4f3f1972", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9952d7009e407b1f226ffaa0b1c52de283840ecbae8eff1da93c11182f9b033", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1c34a75b6274d58213992c304c40273e9d3eef56f2ae9eccea5f51924228676", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40b48c2698d1e0cfb303096ba23faa8a6ca3853c903043ff328ab07d1d7f2858", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c33120714b80513bd39534ff3f5089b4bf2c4001279027c8f5cd8379378ebae", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b274d85e9ec933868cde835d78003d801ef0aabf029759caf5e5ea94104c7afa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0f7d00007858264d38bf6ede76669d56fe398eeb0c3faad91ebedb097015bfa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8bdc0f62d3a106aea51e251716cf78b2de46750dd1371242dcffcd10dd16cb5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a671c7291d57ece9aee85657a17037feafc5bc8d3d05861036107dd0662fed9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8528fdf73b6f007dcaf41c7babdadccf6ccacf92c986d26f3f5ebb3c88d42da", + "format": 1 + }, + { + "name": "tests/unit/requirements-azure.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "551b05366d802821dc9869616601ddb67ffd2a9231aab37f32f4812ca81afcb0", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec1875745063d448e6d9ad63042abac6f55d1e627f1237303b1944cec7c38bdc", + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01b491a2a0634f29873f6063d5badbae8c3643209884a000db8610b80f44dd43", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4205.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66baa4e18249a32876ebe811915dbe67ef1ed0bef7c2f139b6de2252b55e739b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4065.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "094ddd1061505ce3f998eb6e88b6711261ee2aa27cedd49567010fb1ca37888e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4136.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e31cf34c68e72a5a332e9e072356a3a9a7b1ec6ecbecb3a0348f925d076ec8d8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4458.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c43e507a39aa3f9efa2bb17c08dbb4e7698691709e36b344ffbb2f3e0eb9cd67", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4321.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b98d00d32fa143d99acf45492fa9bf2a703c6e0c0bac80cdafc9002f7db3bff", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4264.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "890bd065b20b9c677f72c820d2eae695d93682cfd8cd7e341bc44b1ecf0fadea", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5527.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b4cea8b8c57f922c325a2562afe212b36c163a365a3067797972491a31f1dd8d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3975.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d054ac4f644b1df01315550caf3147bd203e40eb8b89719dafcc198b9baa155", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3922.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77324f4e6a616a46a6ebe8c946f460402085308a341e4c00c8ec3007784470cf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4567.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2731d533d63daa8869a1b08950b623b590fb85f84c6a4af02b37e6153c861e8b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5252.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e4c96f10c645cc8c6f90590c19b843af8a92f14cbe0c48b9e55417bd934b330", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3909.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d7b5abb05c691646e6f19d2518a5cb45a8627d53a8699eb01b0ceb0a14eafd1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4327.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95c492d780e4b8391ca5038c4a81ea1b785d97597df20b44135b5de24e89c482", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3948.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e49f738c3d4bf82c3e7aa8cb0a24e048d0604c4ad42cd859cad0ba7e997370b9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4223.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d2769624054e9fa53590b3d9e6ad80d77883c6f2c90032925439c83c7ac7084", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4281.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0f401fcd9e3352e374cc9896814fff575f5cce759b4cb1cba960c16e35139b3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3913.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82b14dfb837c6a93e4cd4607f5d85d2d306e886f307a0db2d358c79fd41c2385", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3912.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fdc080b861bdf15100f087943826a3e90d89305ac9e08bd003edbd3d689215f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4500.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1442536d70262642ad0679f123831bde8e1c55d130e1becbaef18b0d79fda382", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5307.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f7d721907b0e0c0852f8f72af7f03be88cb20472e0c69ef972b9b2d48af61fb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4516.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43fe4986b1e6e4ff2645327952d9f544a0991fca979abc019d227a25c3614e52", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5002.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6ae72782a58aeb83ee733b05d3c4f203212aea971b2dbf662e5dc0bfde7e162", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5452.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4f04c0c2b957701f044b1c7368fc09fbbab47df39cf7e684553c9e67c2e41eb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5540.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6a3e77590b685362b8938b251e72f1b64e9b15a14bb146fafb21dd31b1a1fa1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5151.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d30b752e9d852e98255c44780372c40d9b3e2d437c1998dd4fcb857823127cdc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5472.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcc0d28fc6dba864e49c460f8186503a813185e295c3afeb70f6876fcbf79cf5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5366.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d69efccb38cd8d8a90203194a7512a52f2992e8af2e7aed5eab6961dcbffcdea", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3965.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "970ab4ad82b69749b11887cd16c8124ff22cc0da582e35a2a7ce166ecb9ef5f0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3911.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e76a7d85e3758bb4dc408749d27c941ef40b50798e7b828cdaadf56f8210ab2a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3946.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af2f9ff445807a9fb42a800476523197177f1672df2e49c8046190f3f2b0d43b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3803.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0003d0beb209eeae803209ba732ee929d512296f21c9bef59414fa19cb90c62f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4542.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95100f5e2baa68ce684750f9c94e46856fc540058ebb5d6be9251feb949bbfee", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4647.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87387e0deec568b28ecbfb30f940db75d167e6cd64e243636853a2a0e18fd380", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4201.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3a29e73f83aba3c999cef2cac0f6ca68f782dd1b329fa0da0b599c2d1955731", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3985.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dd94e353c355e752fa0aad037737434e0ed7f75baa28807262acfe2a82ae672d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3984.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "517de92be276ebc5969fbe0607ec28b2c3beef28277ed801d0d3e79f173abd39", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4200.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "691f2cee49e9b38de09ed9610de8ab36752cb1dbc3ca24c5a668747548de6cdf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4703.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fca33074db70f061a8ade1c912be6ae6a9d547f325f22d01a33bb3fe01dbfa9c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4492.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0a584e4d7b93629917afc8394c6fdb4006cc966acbd202772d1d17ee83c3c31", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4328.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4993359bcd8c731369af85883062d02112c3c1cc70e2b15d8e95bdab2af9312", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3947.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11588e2084d79a358eaa4af53302a36e97c6c1ad9af8c49ea55e60acfac5848b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4386.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a00aa58145d60dd02b2241efc57f5c83e3610aadb0b420e89512b7d2e5aa3e0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3910.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23c0a4cbd8fe7a4792b4f8805235d3f29184264269f5b31fc4f7571a7f156d58", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4105.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d205ac596e3e955783b7f67cb24a366d712a4843caa3445398018ba1f13a8787", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3967.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f325180ed56fc1c530271a04fd115bae3ec32fb3df279f0f08bf40e8324c0c72", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4563.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d784283f7af8b1de73a1d69a4834a59a765cc13843f3ccca096c28001309fb31", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4164.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18bd434037808c9aa267775f708f5b0c256b46661549fb45a77f660ad9c6831d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4021.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b86b7ef5fb28e98b22b7838fd746b4efb3684723e22d7e989559c9f0c9c0a38", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4758.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4254c34a29d4a8e5a86ad03ccac987f2ff39cf2874bad260098fa3c68fe91ec6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5562.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c775744a3c2674be85f94e2d03b65454514f89d102d7419e2b87f7e1ac4b342", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4298.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea8da93d3fad1c5e838daf20aa028258263220f475c19c33b69f57f2eda6013e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4513.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2b56b351170342cd33ef993ee94f5f843a690b43b2ab3251e5a905f5c0421b2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3844.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3f27181c95c1242f5b8e3c91db76276634007240dc007e24fa16617a231fa19", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3995.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "794848f5493e7b8f795351792f2b0faf3fd26bdec6ca61032d8ccbcc91949c67", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4303.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a9fc2f16a2d6a55c911f584702739fe95f65c6810e58b02a68b44997a798127", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4118.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09bc1877922fbe266d127a44cd170cbaf7c76da75b99d74f8744e68e659bf730", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4358.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d16ee9221c9d89fefecdfa8087fb7674ec6bdbbe26a8290fa44a0862015fb8d3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4820.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "593d4956f27b7d20c8a4a98d966eae48cc5a2b39e4481716bf687c467a0e2a56", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4267.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ede30ad3ca2a74a71971f801f40350fdf75daf02da17d65b8e50c58e2963058", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4271.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "709b16cc1317def4369fd8bd414d4f795de8f64f395f8f60eb6d818dacd5bdee", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5437.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e635402bc11724322624fe36b412b5f6fe32a59e1bb998a52b6d6d97e0df6b6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4292.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3dbe54f0e1f3106c3de0b49e9775325eae0604bd861995a8470755421c9a7a93", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4416.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5342.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a50a71e12e405ca5439e4845d7eb4a6cb76c7cde924a9eda4929bb8cdd67b3f3", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "394b4a5fd0611f8e0391646242d99fb4d5262aed1a2c673b0e83a35532b3fa81", + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be7a40a0d78be9817a8bc07e43867f18d03f5ccee1cb3e89ac265ecf88ae17aa", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "346a3fe8d6d060262187ae2c2b4f2fec26cb62bee57056a22bc748d365ae4f21", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69c1d5ad062564e51ea093e92a8a8a53369776964d998bc118fc6f763576cd20", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd3d45d5278e9d4fdc6d2ccf4b4e218199dac954ceda06b15c613020c158e249", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2db808e4f01ee3ea3924ba5adba02a3ee3ed33c5a1540a0a04892bb1ab4fb2f7", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e4ed8909f732d6bd64277ad63b19ba377db566b749048de9fff2834bc88f0d0", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f309598150491c76f6fd83a5da0c0964b7835117b5909128b0d64661c33025fc", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7a9c516b250389edf58a4b047e10b4b5bd3d8e30d2547d00c3846ab63406b49", + "format": 1 + }, + { + "name": "kubectl.sha256", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26fa5d2141ec23edea21153680baee49b5d545bf4fe574301befabf7ca83a025", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/MANIFEST.json b/ansible_collections/netapp/cloudmanager/MANIFEST.json new file mode 100644 index 000000000..f1768abba --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/MANIFEST.json @@ -0,0 +1,37 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "cloudmanager", + "version": "21.22.0", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "netapp", + "cvo", + "cloudmanager", + "amazon", + "cloud", + "storage", + "azure", + "gcp" + ], + "description": "Ansible collection to create CloudManager connectors, CVO instances, CVO aggregates, CVO volumes, and more.", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/netapp.cloudmanager", + "documentation": "https://docs.ansible.com/ansible/latest/collections/netapp/", + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": "https://github.com/ansible-collections/netapp.cloudmanager" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d4c630f97d816aaaf8cc83d416b16c9ebd0dc49ed37386ffbf63f18b11813e7", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/README.md b/ansible_collections/netapp/cloudmanager/README.md new file mode 100644 index 000000000..f6e794a46 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/README.md @@ -0,0 +1,262 @@ +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/cloudmanager/index.html) +![example workflow](https://github.com/ansible-collections/netapp.cloudmanager/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.cloudmanager/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.cloudmanager) +[![Discord](https://img.shields.io/discord/855068651522490400)](https://discord.gg/NetApp) +# Ansible Collection - netapp.cloudmanager + +Copyright (c) 2022 NetApp, Inc. All rights reserved. +Specifications subject to change without notice. + +This collection requires python 3.5 or better. + +# Installation +```bash +ansible-galaxy collection install netapp.cloudmanager +``` +To use this collection, add the following to the top of your playbook: +``` +collections: + - netapp.cloudmanager +``` +# Requirements +- ansible version >= 2.9 +- requests >= 2.20 +- python version >= '3.5' + +# Module documentation +https://docs.ansible.com/ansible/devel/collections/netapp/cloudmanager/ + +# Need help +Join our [Discord](https://discord.gg/NetApp) and look for our #ansible channel. + +# Code of Conduct +This collection follows the [Ansible project's Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). + +# Documentation +https://github.com/ansible-collections/netapp/wiki + +# Release Notes + +## 21.22.0 + - Add `svm_name` option in AWS, AZURE and GCP CVO for creation and update. + +## 21.21.0 + +### Minor Changes + - na_cloudmanager_connector_azure - expose connector managed system identity principal_id tp perform role assignment. + +### New Options + - Add `availability_zone_node1` and `availability_zone_node2` options in CVO Azure HA on the location configuration. + - Add new `storage_type` value Premium_ZRS + +## 21.20.1 + +### Bug Fixes + - new meta/execution-environment.yml is failing ansible-builder sanitize step. + +## 21.20.0 + +### New Options + - Add `availability_zone` option in CVO Azure on the location configuration. + - Add `cluster_key_pair_name` option in CVO AWS for SSH authentication key pair method. + - Add `subnet_path` option in CVO GCP. + +### Bug Fixes + - Fix the `machine_type` default value in the connector GCP. + +### Minor Changes + - na_cloudmanager_volume - Support AWS FsxN working environment + +## 21.19.0 + +### Minor Changes + - Support writing_speed_state modification for AWS, AZURE and GCP CVOs. + +## 21.18.0 + - na_cloudmanager_connector_azure - support full subnet_id and vnet_id + - Support ``writing_speed_state`` modification for AWS, AZURE and GCP CVOs. + +## 21.17.0 + +### Minor Changes + - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager. + - Support ``license_type`` modification for AWS, AZURE and GCP CVOs. + +### New Options + - na_cloudmanager_connector_azure - Support user defined ``storage_account``. The storage account can be created automatically. When ``storage_account`` is not set, the name is constructed by appending 'sa' to the connector ``name``. + - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters ``import_file_system`` and ``file_system_id``. + +## 21.16.0 + +### Bug Fixes + - na_cloudmanager_volume - Add check when volume is capacity tiered. + - na_cloudmanager_connector_azure - Fix string formatting error when deleting the connector. + +### Minor Changes + - na_cloudmanager_connector_gcp - when using the user application default credential authentication by running the command gcloud auth application-default login, ``gcp_service_account_path`` is not needed. + +## 21.15.0 + +### Minor Changes + - Add the description of the client_id based on the cloudmanager UI. + - Update ``license_type`` and ``capacity_package_name`` default values on capacity based license. + +## 21.14.0 + +### Minor Changes + - na_cloudmanager_snapmirror - add AWS FSx to snapmirror. + +### Bug Fixes + - CVO working environment clusterProperties is deprecated. Make changes accordingly. Add CVO update status check on `instance_type` change. + +## 21.13.0 + +### New Modules + - na_cloudmanager_aws_fsx - NetApp AWS FSX + +### Minor Changes + - na_cloudmanager_connector_aws - make the module idempotent for create and delete. + - na_cloudmanager_connector_aws - automatically fetch client_id and instance_id for delete. + - na_cloudmanager_connector_aws - report client_id if connector already exists. + - na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info. + - Add ONTAP image upgrade feature for AWS, AZURE and GCP CVOs. Add ``upgrade_ontap_version`` to indicate if upgrade ONTAP is needed. It only can be used when ``use_latest_version`` is false and ``ontap_version`` is a specific version. + - Add instance_type update feature for AWS, AZURE and GCP CVOs. + - na_cloudmanager_volume - Add ``tiering_policy`` and ``snapshot_policy_name`` modification, and report error if the properties cannot be changed. + +### Bug Fixes + - na_cloudmanager_cvo_gcp - handle extra auto-gen GCP labels to fix `gcp_labels` update failure. + - Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true. + +## 21.12.1 + +### Bug Fixes + - na_cloudmanager_connector_aws - fix default ami not found in the region on resource file. + - na_cloudmanager_snapmirror - report actual error rather than None with "Error getting destination info". + +## 21.12.0 + +### Minor Changes + - Handle extra azure_tag on AZURE CVO and extra gcp_labels on GCP CVO HA on modification. gcp_labels modification on GCP CVO does not support remove labels. + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +### Bug Fixes + - na_cloudmanager_snapmirror - working environment get information api not working for onprem is fixed. + - Fix cannot find working environment if `working_environment_name` is provided. + +## 21.11.0 + +## New Options + - Adding new parameter `capacity_package_name` for all CVOs creation with capacity based license type capacity-paygo or ha-capacity-paygo for HA. + +### Minor Changes + - na_cloudmanager_connector_gcp - make the module idempotent for create and delete. + - na_cloudmanager_connector_gcp - automatically fetch client_id for delete. + - na_cloudmanager_connector_gcp - report client_id if connector already exists. + - all modules - better error reporting if ``refresh_token`` is not valid. + +### Bug Fixes + - na_cloudmanager_connector_gcp - typeError when using proxy certificates. + +## 21.10.0 + +### Minor Changes + - Adding support update on `svm_password`, `tier_level`, `aws_tag`, `azure_tag` and `gcp_labels` for all CVOs. Only these parameters will be modified on the existing CVOs. + +### Bug Fixes + - na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation. + +## New Options + - Adding new parameter `ha_enable_https` for HA CVO to enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false. + - Adding new parameters `kms_key_id` and `kms_key_arn` as AWS encryption parameters for AWS CVO encryption. + - Adding new parameter `azure_encryption_parameters` for AZURE CVO encryption. + - Adding new parameter `gcp_encryption_parameters` for GCP CVO encryption. + +## 21.9.0 + +### New Options + - Adding selflink support on CVO GCP params: `subnet_id`, `vpc0_node_and_data_connectivity`, `vpc1_cluster_connectivity`, `vpc2_ha_connectivity`, `vpc3_data_replication`, `subnet0_node_and_data_connectivity`, `subnet1_cluster_connectivity`, `subnet2_ha_connectivity`, and `subnet3_data_replication`. + - Adding pd-balanced support on ``gcp_volume_type`` CVO GCP and ``provider_volume_type`` for na_cloudmanager_snapmirror and na_cloudmanager_volume. + +### Bug Fixes + - Change `virtual_machine_size` default value to Standard_DS3_v2. + +## 21.8.0 + +### New Options + - Adding stage environment to all modules in cloudmanager. + - Adding service account support on API operations in cloudmanager: `sa_client_id` and `sa_secret_key`. `refresh_token` will be ignored if service account information is provided. + +### Bug Fixes + - Accept client_id end with or without 'clients'. + +## 21.7.0 + +### New Options + - na_cloudmanager_cvo_aws: Support one new ebs_volume_type gp3. + - Adding stage environemt to all modules in cloudmanager. + - na_cloudmanager_volume: Add `aggregate_name` support on volume creation. + - na_cloudmanager_cvo_aws: Support one new `ebs_volume_type` gp3. + - na_cloudmanager_connector_azure: Add `subnet_name` as aliases of `subnet_id`, `vnet_name` as aliases of `vnet_id`. + - na_cloudmanager_aggregate - Add ``provider_volume_type`` gp3 support. + - na_cloudmanager_volume - Add ``provider_volume_type`` gp3 support. + - na_cloudmanager_snapmirror - Add ``provider_volume_type`` gp3 support. + +### Bug Fixes + - na_cloudmanager_aggregate: Improve error message. + - na_cloudmanager_cvo_gcp: Apply `network_project_id` on vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, subnet2_ha_connectivity, subnet3_data_replication. + - na_cloudmanager_connector_gcp: rename option `service_account_email` and `service_account_path` to `gcp_service_account_email` and `gcp_service_account_path` respectively. + - na_cloudmanager_connector_azure: Fix KeyError client_id. + - na_cloudmanager_nss_account: Improve error message. + - na_cloudmanager_volume: Improve error message. + +## 21.6.0 + +### New Modules + - na_cloudmanager_snapmirror: Create or Delete snapmirror on Cloud Manager. + +### Bug Fixes + - na_cloudmanager_connector_gcp: Make client_id as optional. + - na_cloudmanager_cvo_gcp: Change ``vpc_id`` from optional to required. + +## 21.5.1 + +### Bug fixes + - na_cloudmanager_cifs_server: Fix incorrect API call when is_workgroup is true. + - na_cloudmanager_connector_azure: Fix python error - msrest.exceptions.ValidationError. Parameter 'Deployment.properties' can not be None. + - na_cloudmanager_connector_azure: Fix wrong example on the document and update account_id is required field on deletion. + +## 21.5.0 + +### New Options + - na_cloudmanager_connector_aws: Return newly created Azure client ID in cloud manager, instance ID and account ID. New option `proxy_certificates`. + - na_cloudmanager_cvo_aws: Return newly created AWS working_environment_id. + - na_cloudmanager_cvo_azure: Return newly created AZURE working_environment_id. + - na_cloudmanager_cvo_gcp: Return newly created GCP working_environment_id. + +## Bug Fixes + - na_cloudmanager_cvo_aws: Fix incorrect placement of platformSerialNumber in the resulting json structure. + +## 21.4.0 + +### Module documentation changes + - Remove the period at the end of the line on short_description. + - Add period at the end of the names in examples. + - Add notes mentioning support check_mode. + +### New Modules + - na_cloudmanager_connector_azure: Create or delete Cloud Manager connector for Azure. + - na_cloudmanager_cvo_azure: Create or delete Cloud Manager CVO for AZURE for both single and HA. + - na_cloudmanager_info: Gather Cloud Manager subset information using REST APIs. Support for subsets `working_environments_info`, `aggregates_info`, `accounts_info`. + - na_cloudmanager_connector_gcp: Create or delete Cloud Manager connector for GCP. + - na_cloudmanager_cvo_gcp: Create or delete Cloud Manager CVO for GCP for both single and HA. + +## 21.3.0 + +### New Modules + - na_cloudmanager_aggregate: Create or delete an aggregate on Cloud Volumes ONTAP, or add disks on an aggregate. + - na_cloudmanager_cifs_server: Create or delete CIFS server for Cloud Volumes ONTAP. + - na_cloudmanager_connector_aws: Create or delete Cloud Manager connector for AWS. + - na_cloudmanager_cvo_aws: Create or delete Cloud Manager CVO for AWS for both single and HA. + - na_cloudmanager_nss_account: Create or delete a nss account on Cloud Manager. + - na_cloudmanager_volume: Create, modify or delete a volume on Cloud Volumes ONTAP. + diff --git a/ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml b/ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml new file mode 100644 index 000000000..11c23112f --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml @@ -0,0 +1,81 @@ +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + na_cloudmanager_aggregate: + description: NetApp Cloud Manager Aggregate + name: na_cloudmanager_aggregate + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.3.0 + na_cloudmanager_aws_fsx: + description: Cloud ONTAP file system(FSx) in AWS + name: na_cloudmanager_aws_fsx + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.13.0 + na_cloudmanager_cifs_server: + description: NetApp Cloud Manager cifs server + name: na_cloudmanager_cifs_server + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.3.0 + na_cloudmanager_connector_aws: + description: NetApp Cloud Manager connector for AWS + name: na_cloudmanager_connector_aws + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.3.0 + na_cloudmanager_connector_azure: + description: NetApp Cloud Manager connector for Azure. + name: na_cloudmanager_connector_azure + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.4.0 + na_cloudmanager_connector_gcp: + description: NetApp Cloud Manager connector for GCP. + name: na_cloudmanager_connector_gcp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.4.0 + na_cloudmanager_cvo_aws: + description: NetApp Cloud Manager CVO for AWS + name: na_cloudmanager_cvo_aws + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.3.0 + na_cloudmanager_cvo_azure: + description: NetApp Cloud Manager CVO/working environment in single or HA mode + for Azure. + name: na_cloudmanager_cvo_azure + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.4.0 + na_cloudmanager_cvo_gcp: + description: NetApp Cloud Manager CVO for GCP + name: na_cloudmanager_cvo_gcp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.4.0 + na_cloudmanager_info: + description: NetApp Cloud Manager info + name: na_cloudmanager_info + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.4.0 + na_cloudmanager_nss_account: + description: NetApp Cloud Manager nss account + name: na_cloudmanager_nss_account + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.3.0 + na_cloudmanager_snapmirror: + description: NetApp Cloud Manager SnapMirror + name: na_cloudmanager_snapmirror + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.6.0 + na_cloudmanager_volume: + description: NetApp Cloud Manager volume + name: na_cloudmanager_volume + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules + version_added: 21.3.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 21.19.0 diff --git a/ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml b/ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml new file mode 100644 index 000000000..1160305d3 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml @@ -0,0 +1,374 @@ +ancestor: null +releases: + 21.10.0: + changes: + bugfixes: + - na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation + minor_changes: + - Only these parameters will be modified on the existing CVOs. svm_passowrd + will be updated on each run. + - na_cloudmanager_cvo_aws - Support update on svm_password, tier_level, and + aws_tag. + - na_cloudmanager_cvo_aws - add new parameter ``kms_key_id`` and ``kms_key_arn`` + as AWS encryption parameters to support AWS CVO encryption + - na_cloudmanager_cvo_azure - Add new parameter ``ha_enable_https`` for HA CVO + to enable the HTTPS connection from CVO to storage accounts. This can impact + write performance. The default is false. + - na_cloudmanager_cvo_azure - Support update on svm_password, tier_level, and + azure_tag. + - na_cloudmanager_cvo_azure - add new parameter ``azure_encryption_parameters`` + to support AZURE CVO encryption + - na_cloudmanager_cvo_gcp - Support update on svm_password, tier_level, and + gcp_labels. + - na_cloudmanager_cvo_gcp - add new parameter ``gcp_encryption_parameters`` + to support GCP CVO encryption + fragments: + - DEVOPS-4065.yaml + - DEVOPS-4136.yaml + - DEVOPS-4164.yaml + - DEVOPS-4200.yaml + release_date: '2021-09-01' + 21.11.0: + changes: + bugfixes: + - na_cloudmanager_connector_gcp - typeError when using proxy certificates. + minor_changes: + - Add CVO modification unit tests + - Adding new parameter ``capacity_package_name`` for all CVOs creation with + capacity based ``license_type`` capacity-paygo or ha-capacity-paygo for HA. + - all modules - better error reporting if refresh_token is not valid. + - na_cloudmanager_connector_gcp - automatically fetch client_id for delete. + - na_cloudmanager_connector_gcp - make the module idempotent for create and + delete. + - na_cloudmanager_connector_gcp - report client_id if connector already exists. + - na_cloudmanager_cvo_aws - Add unit tests for capacity based license support. + - na_cloudmanager_cvo_azure - Add unit tests for capacity based license support. + - na_cloudmanager_cvo_gcp - Add unit tests for capacity based license support + and delete cvo. + - netapp.py - improve error handling with error content. + fragments: + - DEVOPS-4267.yaml + - DEVOPS-4292.yaml + - DEVOPS-4303.yaml + - DEVOPS-4321.yaml + - DEVOPS-4327.yaml + release_date: '2021-10-06' + 21.12.0: + changes: + bugfixes: + - Fix cannot find working environment if ``working_environment_name`` is provided + minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + - na_cloudmanager_cvo_azure - Add extra tag handling on azure_tag maintenance + - na_cloudmanager_cvo_gcp - Add extra label hendling for HA and only allow add + new labels on gcp_labels + - na_cloudmanager_snapmirror - working environment get information api not working + for onprem is fixed + fragments: + - DEVOPS-4328.yaml + - DEVOPS-4358.yaml + - DEVOPS-4386.yaml + - DEVOPS-4416.yaml + release_date: '2021-11-03' + 21.12.1: + changes: + bugfixes: + - na_cloudmanager_connector_aws - Fix default ami not based on the region in + resource file + - na_cloudmanager_snapmirror - report actual error rather than None with "Error + getting destination info". + fragments: + - DEVOPS-4298.yaml + - DEVOPS-4458.yaml + release_date: '2021-11-23' + 21.13.0: + changes: + bugfixes: + - na_cloudmanager_cvo_gcp - handle extra two auto-gen GCP labels to prevent + update ``gcp_labels`` failure. + minor_changes: + - Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and + GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true. + - Add ontap image upgrade on AWS, AZURE and GCP CVOs if ``upgrade_ontap_version`` + is true and ``ontap_version`` is provided with a specific version. ``use_latest_version`` + has to be false. + - na_cloudmanager_connector_aws - automatically fetch client_id and instance_id + for delete. + - na_cloudmanager_connector_aws - make the module idempotent for create and + delete. + - na_cloudmanager_connector_aws - report client_id and instance_id if connector + already exists. + - na_cloudmanager_cvo_aws - Support instance_type update + - na_cloudmanager_cvo_azure - Support instance_type update + - na_cloudmanager_cvo_gcp - Support instance_type update + - na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info + - na_cloudmanager_volume - Report error if the volume properties cannot be modified. + Add support ``tiering_policy`` and ``snapshot_policy_name`` modification. + fragments: + - DEVOPS-4264.yaml + - DEVOPS-4271.yaml + - DEVOPS-4492.yaml + - DEVOPS-4500.yaml + - DEVOPS-4513.yaml + - DEVOPS-4542.yaml + modules: + - description: Cloud ONTAP file system(FSX) in AWS + name: na_cloudmanager_aws_fsx + namespace: '' + release_date: '2022-01-12' + 21.14.0: + changes: + bugfixes: + - CVO working environment clusterProperties is deprecated. Make changes accordingly. + Add CVO update status check on ``instance_type`` change. + minor_changes: + - na_cloudmanager_snapmirror - Add FSX to snapmirror. + fragments: + - DEVOPS-4516.yaml + - DEVOPS-4563.yaml + release_date: '2022-02-02' + 21.15.0: + changes: + minor_changes: + - Add the description of client_id based on the cloudmanager UI. + - Set license_type default value 'capacity-paygo' for single node 'ha-capacity-paygo' + for HA and 'capacity_package_name' value 'Essential' + fragments: + - DEVOPS-4647.yaml + - DEVOPS-4703.yaml + release_date: '2022-03-02' + 21.16.0: + changes: + bugfixes: + - Add check when volume is capacity tiered. + - na_cloudmanager_connector_azure - Fix string formatting error when deleting + the connector. + minor_changes: + - na_cloudmanager_connector_gcp - when using the user application default credential + authentication by running the command gcloud auth application-default login, + ``gcp_service_account_path`` is not needed. + fragments: + - DEVOPS-4567.yaml + - DEVOPS-4758.yaml + - DEVOPS-4820.yaml + release_date: '2022-04-05' + 21.17.0: + changes: + minor_changes: + - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters + ``import_file_system`` and ``file_system_id``. + - na_cloudmanager_connector_azure - Support user defined ``storage_account`` + name. The ``storage_account`` can be created automatically. When ``storage_account`` + is not set, the name is constructed by appending 'sa' to the connector ``name``. + - na_cloudmanager_cvo_aws - Support license_type update + - na_cloudmanager_cvo_azure - Support license_type update + - na_cloudmanager_cvo_gcp - Support license_type update + fragments: + - DEVOPS-4223.yaml + - DEVOPS-4281.yaml + - DEVOPS-5002.yaml + release_date: '2022-05-04' + 21.18.0: + changes: + minor_changes: + - na_cloudmanager_connector_azure - Support full ``subnet_id`` and ``vnet_id``. + fragments: + - DEVOPS-5151.yaml + release_date: '2022-06-09' + 21.19.0: + changes: + minor_changes: + - Support ``writing_speed_state`` modification on AWS, AZURE and GCP CVOs. + fragments: + - DEVOPS-5252.yaml + release_date: '2022-08-03' + 21.20.0: + changes: + bugfixes: + - na_cloudmanager_connector_gcp - Fix default machine_type value on the GCP + connector. + minor_changes: + - Add ``availability_zone`` option in CVO Azure on the location configuration. + - Add ``subnet_path`` option in CVO GCP. + - na_cloudmanager_cvo_aws - Add new parameter ``cluster_key_pair_name`` to support + SSH authentication method key pair. + - na_cloudmanager_volume - Support AWS FsxN working environment. + fragments: + - DEVOPS-5307.yaml + - DEVOPS-5342.yaml + - DEVOPS-5366.yaml + - DEVOPS-5437.yaml + - DEVOPS-5472.yaml + release_date: '2022-10-05' + 21.20.1: + changes: + bugfixes: + - new meta/execution-environment.yml is failing ansible-builder sanitize step. + fragments: + - DEVOPS-5540.yaml + release_date: '2022-10-07' + 21.21.0: + changes: + minor_changes: + - na_cloudmanager_connector_azure - expose connector managed system identity + principal_id to perform role assignment + - na_cloudmanager_cvo_azure - Add new ``storage_type`` value Premium_ZRS + - na_cloudmanager_cvo_azure - Add parameter ``availability_zone_node1`` and + ``availability_zone_node2`` for CVO Azure HA location + fragments: + - DEVOPS-5527.yaml + - DEVOPS-5562.yaml + release_date: '2022-11-02' + 21.22.0: + changes: + minor_changes: + - Add ``svm_name`` option in CVO for AWS, AZURE and GCP creation and update. + fragments: + - DEVOPS-5452.yaml + release_date: '2022-12-07' + 21.3.0: + modules: + - description: NetApp Cloud Manager Aggregate + name: na_cloudmanager_aggregate + namespace: '' + - description: NetApp Cloud Manager cifs server + name: na_cloudmanager_cifs_server + namespace: '' + - description: NetApp Cloud Manager connector for AWS + name: na_cloudmanager_connector_aws + namespace: '' + - description: NetApp Cloud Manager CVO for AWS + name: na_cloudmanager_cvo_aws + namespace: '' + - description: NetApp Cloud Manager nss account + name: na_cloudmanager_nss_account + namespace: '' + - description: NetApp Cloud Manager volume + name: na_cloudmanager_volume + namespace: '' + release_date: '2021-03-03' + 21.4.0: + modules: + - description: NetApp Cloud Manager connector for Azure. + name: na_cloudmanager_connector_azure + namespace: '' + - description: NetApp Cloud Manager connector for GCP. + name: na_cloudmanager_connector_gcp + namespace: '' + - description: NetApp Cloud Manager CVO/working environment in single or HA mode + for Azure. + name: na_cloudmanager_cvo_azure + namespace: '' + - description: NetApp Cloud Manager info + name: na_cloudmanager_info + namespace: '' + release_date: '2021-04-07' + 21.5.0: + changes: + bugfixes: + - na_cloudmanager_cvo_aws - Fix incorrect placement of platformSerialNumber + in the resulting json structure + minor_changes: + - na_cloudmanager_connector_aws - Return newly created Azure client ID in cloud + manager, instance ID and account ID. New option ``proxy_certificates``. + - na_cloudmanager_cvo_aws - Return newly created AWS working_environment_id. + - na_cloudmanager_cvo_azure - Return newly created AZURE working_environment_id. + - na_cloudmanager_cvo_gcp - Return newly created GCP working_environment_id. + fragments: + - DEVOPS-3803.yaml + - DEVOPS-3844.yaml + - DEVOPS-3922.yaml + release_date: '2021-04-21' + 21.6.0: + changes: + bugfixes: + - na_cloudmanager_cifs_server - Fix incorrect API call when is_workgroup is + true + - na_cloudmanager_connector_azure - Change client_id as optional + - na_cloudmanager_connector_azure - Fix python error - msrest.exceptions.ValidationError. + Parameter 'Deployment.properties' can not be None. + - na_cloudmanager_connector_azure - Fix wrong example on the document and update + account_id is required field on deletion. + - na_cloudmanager_cvo_gcp - Change vpc_id from optional to required. + fragments: + - DEVOPS-3910.yaml + - DEVOPS-3911.yaml + - DEVOPS-3913.yaml + - DEVOPS-3946.yaml + - DEVOPS-3948.yaml + modules: + - description: NetApp Cloud Manager SnapMirror + name: na_cloudmanager_snapmirror + namespace: '' + release_date: '2021-05-06' + 21.7.0: + changes: + bugfixes: + - na_cloudmanager_aggregate - Improve error message + - na_cloudmanager_connector_azure - Add subnet_name as aliases of subnet_id, + vnet_name as aliases of vnet_id. + - na_cloudmanager_connector_azure - Fix KeyError client_id + - na_cloudmanager_cvo_gcp - Apply network_project_id check on vpc1_cluster_connectivity, + vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, + subnet2_ha_connectivity, subnet3_data_replication + - na_cloudmanager_nss_account - Improve error message + - na_cloudmanager_volume - Improve error message + minor_changes: + - na_cloudmanager_aggregate - Add provider_volume_type gp3 support. + - na_cloudmanager_connector_gcp - rename option ``service_account_email`` and + ``service_account_path`` to ``gcp_service_account_email`` and ``gcp_service_account_path`` + respectively. + - na_cloudmanager_cvo_aws - Add ebs_volume_type gp3 support. + - na_cloudmanager_snapmirror - Add provider_volume_type gp3 support. + - na_cloudmanager_volume - Add aggregate_name support on volume creation. + - na_cloudmanager_volume - Add provider_volume_type gp3 support. + fragments: + - DEVOPS-3909.yaml + - DEVOPS-3912.yaml + - DEVOPS-3947.yaml + - DEVOPS-3967.yaml + - DEVOPS-3975.yaml + - DEVOPS-3984.yaml + - DEVOPS-3985.yaml + - DEVOPS-3995.yaml + release_date: '2021-06-03' + 21.8.0: + changes: + bugfixes: + - na_cloudmanager_aggregate - accept client_id end with or without 'clients' + - na_cloudmanager_cifs_server - accept client_id end with or without 'clients' + - na_cloudmanager_connector_aws - accept client_id end with or without 'clients' + - na_cloudmanager_connector_azure - accept client_id end with or without 'clients' + - na_cloudmanager_connector_gcp - accept client_id end with or without 'clients' + - na_cloudmanager_cvo_aws - accept client_id end with or without 'clients' + - na_cloudmanager_cvo_azure - accept client_id end with or without 'clients' + - na_cloudmanager_cvo_gcp - accept client_id end with or without 'clients' + - na_cloudmanager_info - accept client_id end with or without 'clients' + - na_cloudmanager_nss_account - accept client_id end with or without 'clients' + - na_cloudmanager_snapmirror - accept client_id end with or without 'clients' + - na_cloudmanager_volume - accept client_id end with or without 'clients' + major_changes: + - Adding stage environment to all modules in cloudmanager + minor_changes: + - na_cloudmanager - Support service account with new options ``sa_client_id`` + and ``sa_secret_key`` to use for API operations. + fragments: + - DEVOPS-3965.yaml + - DEVOPS-4021.yaml + - DEVOPS-4105.yaml + release_date: '2021-07-14' + 21.9.0: + changes: + minor_changes: + - na_cloudmanager - Support pd-balanced in ``gcp_volume_type`` for CVO GCP, + ``provider_volume_type`` in na_cloudmanager_snapmirror and na_cloudmanager_volume. + - na_cloudmanager_connector_azure - Change default value of ``virtual_machine_size`` + to Standard_DS3_v2. + - na_cloudmanager_cvo_gcp - Add selflink support on subnet_id, vpc0_node_and_data_connectivity, + vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet0_node_and_data_connectivity, + subnet1_cluster_connectivity, subnet2_ha_connectivity, and subnet3_data_replication. + fragments: + - DEVOPS-4118.yaml + - DEVOPS-4201.yaml + - DEVOPS-4205.yaml + release_date: '2021-08-04' diff --git a/ansible_collections/netapp/cloudmanager/changelogs/config.yaml b/ansible_collections/netapp/cloudmanager/changelogs/config.yaml new file mode 100644 index 000000000..d0ffb959e --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: NetApp CloudManager Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml new file mode 100644 index 000000000..af0b39ae9 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_aws - Return newly created Azure client ID in cloud manager, instance ID and account ID. New option ``proxy_certificates``. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml new file mode 100644 index 000000000..e36f3ffb6 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Return newly created AWS working_environment_id. + - na_cloudmanager_cvo_azure - Return newly created AZURE working_environment_id. + - na_cloudmanager_cvo_gcp - Return newly created GCP working_environment_id. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml new file mode 100644 index 000000000..6336f1adc --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_cloudmanager_aggregate - Improve error message + - na_cloudmanager_nss_account - Improve error message + - na_cloudmanager_volume - Improve error message diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml new file mode 100644 index 000000000..0e9dd6390 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_cifs_server - Fix incorrect API call when is_workgroup is true \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml new file mode 100644 index 000000000..a4ffd6b1e --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_azure - Fix wrong example on the document and update account_id is required field on deletion. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml new file mode 100644 index 000000000..f8ca958f3 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_azure - Add subnet_name as aliases of subnet_id, vnet_name as aliases of vnet_id. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml new file mode 100644 index 000000000..ca13f3a6f --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_azure - Fix python error - msrest.exceptions.ValidationError. Parameter 'Deployment.properties' can not be None. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml new file mode 100644 index 000000000..37cfe5d57 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_cvo_aws - Fix incorrect placement of platformSerialNumber in the resulting json structure diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml new file mode 100644 index 000000000..0dd5745e7 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_azure - Change client_id as optional \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml new file mode 100644 index 000000000..320552cb3 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_gcp - rename option ``service_account_email`` and ``service_account_path`` to ``gcp_service_account_email`` and ``gcp_service_account_path`` respectively. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml new file mode 100644 index 000000000..3e5dd7522 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_cvo_gcp - Change vpc_id from optional to required. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml new file mode 100644 index 000000000..17e1435eb --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml @@ -0,0 +1,2 @@ +major_changes: + - Adding stage environment to all modules in cloudmanager \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml new file mode 100644 index 000000000..ccd3e01c0 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Add ebs_volume_type gp3 support. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml new file mode 100644 index 000000000..8f970c8ee --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_volume - Add aggregate_name support on volume creation. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml new file mode 100644 index 000000000..b08225316 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_cloudmanager_aggregate - Add provider_volume_type gp3 support. + - na_cloudmanager_volume - Add provider_volume_type gp3 support. + - na_cloudmanager_snapmirror - Add provider_volume_type gp3 support. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml new file mode 100644 index 000000000..cfb6b4289 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_cvo_gcp - Apply network_project_id check on vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, subnet2_ha_connectivity, subnet3_data_replication \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml new file mode 100644 index 000000000..76f26a264 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_azure - Fix KeyError client_id \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml new file mode 100644 index 000000000..6e3903967 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager - Support service account with new options ``sa_client_id`` and ``sa_secret_key`` to use for API operations. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml new file mode 100644 index 000000000..e05522215 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Support update on svm_password, tier_level, and aws_tag. + - na_cloudmanager_cvo_azure - Support update on svm_password, tier_level, and azure_tag. + - na_cloudmanager_cvo_gcp - Support update on svm_password, tier_level, and gcp_labels. + - Only these parameters will be modified on the existing CVOs. svm_passowrd will be updated on each run. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml new file mode 100644 index 000000000..91ee46370 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml @@ -0,0 +1,13 @@ +bugfixes: + - na_cloudmanager_aggregate - accept client_id end with or without 'clients' + - na_cloudmanager_cifs_server - accept client_id end with or without 'clients' + - na_cloudmanager_connector_aws - accept client_id end with or without 'clients' + - na_cloudmanager_connector_azure - accept client_id end with or without 'clients' + - na_cloudmanager_connector_gcp - accept client_id end with or without 'clients' + - na_cloudmanager_cvo_aws - accept client_id end with or without 'clients' + - na_cloudmanager_cvo_azure - accept client_id end with or without 'clients' + - na_cloudmanager_cvo_gcp - accept client_id end with or without 'clients' + - na_cloudmanager_info - accept client_id end with or without 'clients' + - na_cloudmanager_nss_account - accept client_id end with or without 'clients' + - na_cloudmanager_snapmirror - accept client_id end with or without 'clients' + - na_cloudmanager_volume - accept client_id end with or without 'clients' \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml new file mode 100644 index 000000000..e176f9574 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_cvo_gcp - Add selflink support on subnet_id, vpc0_node_and_data_connectivity, vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet0_node_and_data_connectivity, subnet1_cluster_connectivity, subnet2_ha_connectivity, and subnet3_data_replication. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml new file mode 100644 index 000000000..8d5494695 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_cvo_azure - Add new parameter ``ha_enable_https`` for HA CVO to enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml new file mode 100644 index 000000000..e8fb7cdbd --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_cloudmanager_cvo_aws - add new parameter ``kms_key_id`` and ``kms_key_arn`` as AWS encryption parameters to support AWS CVO encryption + - na_cloudmanager_cvo_azure - add new parameter ``azure_encryption_parameters`` to support AZURE CVO encryption + - na_cloudmanager_cvo_gcp - add new parameter ``gcp_encryption_parameters`` to support GCP CVO encryption diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml new file mode 100644 index 000000000..c6dc8ce07 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml new file mode 100644 index 000000000..b55a9bc0d --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_azure - Change default value of ``virtual_machine_size`` to Standard_DS3_v2. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml new file mode 100644 index 000000000..58750c3aa --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager - Support pd-balanced in ``gcp_volume_type`` for CVO GCP, ``provider_volume_type`` in na_cloudmanager_snapmirror and na_cloudmanager_volume. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml new file mode 100644 index 000000000..b4f998061 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Support license_type update + - na_cloudmanager_cvo_azure - Support license_type update + - na_cloudmanager_cvo_gcp - Support license_type update \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml new file mode 100644 index 000000000..d986b0991 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add ontap image upgrade on AWS, AZURE and GCP CVOs if ``upgrade_ontap_version`` is true and ``ontap_version`` is provided with a specific version. ``use_latest_version`` has to be false. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml new file mode 100644 index 000000000..1ce27541a --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add CVO modification unit tests \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml new file mode 100644 index 000000000..d6cbc19e0 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Support instance_type update + - na_cloudmanager_cvo_azure - Support instance_type update + - na_cloudmanager_cvo_gcp - Support instance_type update \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml new file mode 100644 index 000000000..33295e409 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters ``import_file_system`` and ``file_system_id``. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml new file mode 100644 index 000000000..22bfaa25a --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml @@ -0,0 +1,8 @@ +minor_changes: + - na_cloudmanager_connector_gcp - make the module idempotent for create and delete. + - na_cloudmanager_connector_gcp - automatically fetch client_id for delete. + - na_cloudmanager_connector_gcp - report client_id if connector already exists. + - all modules - better error reporting if refresh_token is not valid. + +bugfixes: + - na_cloudmanager_connector_gcp - typeError when using proxy certificates. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml new file mode 100644 index 000000000..2f0281975 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_aws - Fix default ami not based on the region in resource file \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml new file mode 100644 index 000000000..183a0e149 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Adding new parameter ``capacity_package_name`` for all CVOs creation with capacity based ``license_type`` capacity-paygo or ha-capacity-paygo for HA. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml new file mode 100644 index 000000000..f06f7e78d --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Add unit tests for capacity based license support. + - na_cloudmanager_cvo_azure - Add unit tests for capacity based license support. + - na_cloudmanager_cvo_gcp - Add unit tests for capacity based license support and delete cvo. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml new file mode 100644 index 000000000..15f75f223 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml @@ -0,0 +1,2 @@ +minor_changes: + - netapp.py - improve error handling with error content. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml new file mode 100644 index 000000000..5d9a08a85 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_snapmirror - working environment get information api not working for onprem is fixed \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml new file mode 100644 index 000000000..cbc0dcaf0 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_cloudmanager_cvo_azure - Add extra tag handling on azure_tag maintenance + - na_cloudmanager_cvo_gcp - Add extra label hendling for HA and only allow add new labels on gcp_labels \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml new file mode 100644 index 000000000..e9c67085d --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml @@ -0,0 +1,2 @@ +bugfixes: + - Fix cannot find working environment if ``working_environment_name`` is provided \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml new file mode 100644 index 000000000..6b4b660a0 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml new file mode 100644 index 000000000..75058f80c --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_snapmirror - report actual error rather than None with "Error getting destination info". diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml new file mode 100644 index 000000000..2e37eb799 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_cloudmanager_connector_aws - make the module idempotent for create and delete. + - na_cloudmanager_connector_aws - automatically fetch client_id and instance_id for delete. + - na_cloudmanager_connector_aws - report client_id and instance_id if connector already exists. + - na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml new file mode 100644 index 000000000..10384a2b2 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml new file mode 100644 index 000000000..adb0ea1cb --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_cvo_gcp - handle extra two auto-gen GCP labels to prevent update ``gcp_labels`` failure. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml new file mode 100644 index 000000000..f8bfbeb99 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_snapmirror - Add FSX to snapmirror. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml new file mode 100644 index 000000000..51ee7719f --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_volume - Report error if the volume properties cannot be modified. Add support ``tiering_policy`` and ``snapshot_policy_name`` modification. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml new file mode 100644 index 000000000..c7a1c8a40 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml @@ -0,0 +1,2 @@ +bugfixes: + - CVO working environment clusterProperties is deprecated. Make changes accordingly. Add CVO update status check on ``instance_type`` change. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml new file mode 100644 index 000000000..4aeae1916 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml @@ -0,0 +1,2 @@ +bugfixes: + - Add check when volume is capacity tiered. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml new file mode 100644 index 000000000..5320ef4cf --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add the description of client_id based on the cloudmanager UI. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml new file mode 100644 index 000000000..adb6c3d51 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Set license_type default value 'capacity-paygo' for single node 'ha-capacity-paygo' for HA and 'capacity_package_name' value 'Essential' \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml new file mode 100644 index 000000000..2bb42546d --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_gcp - when using the user application default credential authentication by running the command gcloud auth application-default login, ``gcp_service_account_path`` is not needed. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml new file mode 100644 index 000000000..28e61171d --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_azure - Fix string formatting error when deleting the connector. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml new file mode 100644 index 000000000..4543db292 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_azure - Support user defined ``storage_account`` name. The ``storage_account`` can be created automatically. When ``storage_account`` is not set, the name is constructed by appending 'sa' to the connector ``name``. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml new file mode 100644 index 000000000..929ad60da --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_azure - Support full ``subnet_id`` and ``vnet_id``. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml new file mode 100644 index 000000000..9f9a98f58 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Support ``writing_speed_state`` modification on AWS, AZURE and GCP CVOs. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml new file mode 100644 index 000000000..01fb9b920 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_volume - Support AWS FsxN working environment. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml new file mode 100644 index 000000000..b7d0e1bc9 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add ``subnet_path`` option in CVO GCP. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml new file mode 100644 index 000000000..16ea910ec --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_cvo_aws - Add new parameter ``cluster_key_pair_name`` to support SSH authentication method key pair. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml new file mode 100644 index 000000000..3222dc7c5 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_cloudmanager_connector_gcp - Fix default machine_type value on the GCP connector. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml new file mode 100644 index 000000000..3a9207105 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add ``svm_name`` option in CVO for AWS, AZURE and GCP creation and update. \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml new file mode 100644 index 000000000..494e17c3d --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Add ``availability_zone`` option in CVO Azure on the location configuration. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml new file mode 100644 index 000000000..e1643c975 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_cloudmanager_connector_azure - expose connector managed system identity principal_id to perform role assignment \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml new file mode 100644 index 000000000..ca5e328eb --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml @@ -0,0 +1,2 @@ +bugfixes: + - new meta/execution-environment.yml is failing ansible-builder sanitize step. diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml new file mode 100644 index 000000000..287e843b1 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_cloudmanager_cvo_azure - Add parameter ``availability_zone_node1`` and ``availability_zone_node2`` for CVO Azure HA location + - na_cloudmanager_cvo_azure - Add new ``storage_type`` value Premium_ZRS \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/README.md b/ansible_collections/netapp/cloudmanager/execution_environments/README.md new file mode 100644 index 000000000..fda73b90f --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/execution_environments/README.md @@ -0,0 +1,34 @@ +# How to build an Ansible Execution Environment + +## Prerequisites +This was tested with ansible-builder version 1.1.0. + +## Building from Galaxy +Using the files in the ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy directory as a template: +- execution-environment.yml describes the build environment. +- requirements.yml defines the collections to add into you execution environment. + +Then build with: + +``` +ansible-builder build +``` + +For instance, using podman instead of docker, and tagging: +``` +ansible-builder build --container-runtime=podman --tag myregistry.io/ansible-ee-netapp-cm:21.20.1 -f execution-environment.yml -v 3 +``` + +In my case, I needed to use sudo. + +## Building from GitHub +Alternativaly, the source code can be downloaded from GitHub. It allows to get code before release (at your own risks) or to use a fork. +See ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml + +## References + +https://ansible-builder.readthedocs.io/en/stable/usage/ + +https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html + + diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml new file mode 100644 index 000000000..466fb8373 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml @@ -0,0 +1,10 @@ +--- +version: 1 + +# ansible_config: 'ansible.cfg' + +# build_arg_defaults: +# EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest' + +dependencies: + galaxy: requirements.yml diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml new file mode 100644 index 000000000..b19e33b49 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml @@ -0,0 +1,13 @@ +--- +collections: + # Install collections from Galaxy + # - name: ansible.posix + # - name: netapp.aws + # # name: - netapp.azure + - name: netapp.cloudmanager + version: 21.20.1 + # - name: netapp.elementsw + # - name: netapp.ontap + # version: 21.24.1 + # - name: netapp.storagegrid + # - name: netapp.um_info diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml new file mode 100644 index 000000000..466fb8373 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml @@ -0,0 +1,10 @@ +--- +version: 1 + +# ansible_config: 'ansible.cfg' + +# build_arg_defaults: +# EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest' + +dependencies: + galaxy: requirements.yml diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml new file mode 100644 index 000000000..efea39c22 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml @@ -0,0 +1,18 @@ +--- +collections: + # Install collections from Galaxy + # - name: ansible.posix + # - name: netapp.aws + # # name: - netapp.azure + # - name: netapp.cloudmanager + # version: 21.20.1 + # - name: netapp.elementsw + # - name: netapp.ontap + # version: 21.24.1 + # - name: netapp.storagegrid + # - name: netapp.um_info + + # Install a collection from GitHub. + - source: https://github.com/ansible-collections/netapp.cloudmanager.git + type: git + version: test_ee_21_20_0 diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt b/ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt new file mode 100644 index 000000000..02dd40520 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt @@ -0,0 +1 @@ +ansible-builder diff --git a/ansible_collections/netapp/cloudmanager/kubectl.sha256 b/ansible_collections/netapp/cloudmanager/kubectl.sha256 new file mode 100644 index 000000000..13867098c --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/kubectl.sha256 @@ -0,0 +1 @@ +b859766d7b47267af5cc1ee01a2d0c3c137dbfc53cd5be066181beed11ec7d34 \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/meta/execution-environment.yml b/ansible_collections/netapp/cloudmanager/meta/execution-environment.yml new file mode 100644 index 000000000..ad211b139 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/meta/execution-environment.yml @@ -0,0 +1,3 @@ +version: 1 +dependencies: + python: requirements.txt diff --git a/ansible_collections/netapp/cloudmanager/meta/runtime.yml b/ansible_collections/netapp/cloudmanager/meta/runtime.yml new file mode 100644 index 000000000..df9365301 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/meta/runtime.yml @@ -0,0 +1,17 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_cloudmanager: + - na_cloudmanager_aggregate + - na_cloudmanager_cifs_server + - na_cloudmanager_connector_aws + - na_cloudmanager_connector_azure + - na_cloudmanager_connector_gcp + - na_cloudmanager_cvo_aws + - na_cloudmanager_cvo_azure + - na_cloudmanager_cvo_gcp + - na_cloudmanager_info + - na_cloudmanager_nss_account + - na_cloudmanager_snapmirror + - na_cloudmanager_volume + - na_cloudmanager_aws_fsx diff --git a/ansible_collections/netapp/cloudmanager/plugins/README.md b/ansible_collections/netapp/cloudmanager/plugins/README.md new file mode 100644 index 000000000..6541cf7cf --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/README.md @@ -0,0 +1,31 @@ +# Collections Plugins Directory + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html). \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..76807bb1c --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, NetApp Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class ModuleDocFragment(object): + # Documentation fragment for CLOUDMANAGER + CLOUDMANAGER = """ +options: + refresh_token: + type: str + description: + - The refresh token for NetApp Cloud Manager API operations. + + sa_secret_key: + type: str + description: + - The service account secret key for NetApp Cloud Manager API operations. + + sa_client_id: + type: str + description: + - The service account secret client ID for NetApp Cloud Manager API operations. + + environment: + type: str + description: + - The environment for NetApp Cloud Manager API operations. + default: prod + choices: ['prod', 'stage'] + version_added: 21.8.0 + + feature_flags: + description: + - Enable or disable a new feature. + - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility. + - Supported keys and values are subject to change without notice. Unknown keys are ignored. + type: dict + version_added: 21.11.0 +notes: + - The modules prefixed with na_cloudmanager are built to manage CloudManager and CVO deployments in AWS/GCP/Azure clouds. + - If sa_client_id and sa_secret_key are provided, service account will be used in operations. refresh_token will be ignored. +""" diff --git a/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py new file mode 100644 index 000000000..eaecc8f00 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py @@ -0,0 +1,332 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2017-2021, NetApp Ansible Team +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +netapp.py: wrapper around send_requests and other utilities +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import logging +import time +from ansible.module_utils.basic import missing_required_lib + +try: + from ansible.module_utils.ansible_release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + +COLLECTION_VERSION = "21.22.0" +PROD_ENVIRONMENT = { + 'CLOUD_MANAGER_HOST': 'cloudmanager.cloud.netapp.com', + 'AUTH0_DOMAIN': 'netapp-cloud-account.auth0.com', + 'SA_AUTH_HOST': 'cloudmanager.cloud.netapp.com/auth/oauth/token', + 'AUTH0_CLIENT': 'Mu0V1ywgYteI6w1MbD15fKfVIUrNXGWC', + 'AMI_FILTER': 'Setup-As-Service-AMI-Prod*', + 'AWS_ACCOUNT': '952013314444', + 'GCP_IMAGE_PROJECT': 'netapp-cloudmanager', + 'GCP_IMAGE_FAMILY': 'cloudmanager', + 'CVS_HOST_NAME': 'https://api.services.cloud.netapp.com' +} +STAGE_ENVIRONMENT = { + 'CLOUD_MANAGER_HOST': 'staging.cloudmanager.cloud.netapp.com', + 'AUTH0_DOMAIN': 'staging-netapp-cloud-account.auth0.com', + 'SA_AUTH_HOST': 'staging.cloudmanager.cloud.netapp.com/auth/oauth/token', + 'AUTH0_CLIENT': 'O6AHa7kedZfzHaxN80dnrIcuPBGEUvEv', + 'AMI_FILTER': 'Setup-As-Service-AMI-*', + 'AWS_ACCOUNT': '282316784512', + 'GCP_IMAGE_PROJECT': 'tlv-automation', + 'GCP_IMAGE_FAMILY': 'occm-automation', + 'CVS_HOST_NAME': 'https://staging.api.services.cloud.netapp.com' +} + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + + +POW2_BYTE_MAP = dict( + # Here, 1 kb = 1024 + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 +) + + +LOG = logging.getLogger(__name__) +LOG_FILE = '/tmp/cloudmanager_apis.log' + + +def cloudmanager_host_argument_spec(): + + return dict( + refresh_token=dict(required=False, type='str', no_log=True), + sa_client_id=dict(required=False, type='str', no_log=True), + sa_secret_key=dict(required=False, type='str', no_log=True), + environment=dict(required=False, type='str', choices=['prod', 'stage'], default='prod'), + feature_flags=dict(required=False, type='dict') + ) + + +def has_feature(module, feature_name): + feature = get_feature(module, feature_name) + if isinstance(feature, bool): + return feature + module.fail_json(msg="Error: expected bool type for feature flag: %s, found %s" % (feature_name, type(feature))) + + +def get_feature(module, feature_name): + ''' if the user has configured the feature, use it + otherwise, use our default + ''' + default_flags = dict( + trace_apis=False, # if True, append REST requests/responses to /tmp/cloudmanager_apis.log + trace_headers=False, # if True, and if trace_apis is True, include headers in trace + show_modified=True, + simulator=False, # if True, it is running on simulator + ) + + if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']: + return module.params['feature_flags'][feature_name] + if feature_name in default_flags: + return default_flags[feature_name] + module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name) + + +class CloudManagerRestAPI(object): + """ wrapper around send_request """ + def __init__(self, module, timeout=60): + self.module = module + self.timeout = timeout + self.refresh_token = self.module.params['refresh_token'] + self.sa_client_id = self.module.params['sa_client_id'] + self.sa_secret_key = self.module.params['sa_secret_key'] + self.environment = self.module.params['environment'] + if self.environment == 'prod': + self.environment_data = PROD_ENVIRONMENT + elif self.environment == 'stage': + self.environment_data = STAGE_ENVIRONMENT + self.url = 'https://' + self.api_root_path = None + self.check_required_library() + if has_feature(module, 'trace_apis'): + logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s') + self.log_headers = has_feature(module, 'trace_headers') # requires trace_apis to do anything + self.simulator = has_feature(module, 'simulator') + self.token_type, self.token = self.get_token() + + def check_required_library(self): + if not HAS_REQUESTS: + self.module.fail_json(msg=missing_required_lib('requests')) + + def format_client_id(self, client_id): + return client_id if client_id.endswith('clients') else client_id + 'clients' + + def build_url(self, api): + # most requests are sent to Cloud Manager, but for connectors we need to manage VM instances using AWS, Azure, or GCP APIs + if api.startswith('http'): + return api + # add host if API starts with / and host is not already included in self.url + prefix = self.environment_data['CLOUD_MANAGER_HOST'] if self.environment_data['CLOUD_MANAGER_HOST'] not in self.url and api.startswith('/') else '' + return self.url + prefix + api + + def send_request(self, method, api, params, json=None, data=None, header=None, authorized=True): + ''' send http request and process response, including error conditions ''' + url = self.build_url(api) + headers = { + 'Content-type': "application/json", + 'Referer': "Ansible_NetApp", + } + if authorized: + headers['Authorization'] = self.token_type + " " + self.token + if header is not None: + headers.update(header) + for __ in range(3): + json_dict, error_details, on_cloud_request_id = self._send_request(method, url, params, json, data, headers) + # we observe this error with DELETE on agents-mgmt/agent (and sometimes on GET) + if error_details is not None and 'Max retries exceeded with url:' in error_details: + time.sleep(5) + else: + break + return json_dict, error_details, on_cloud_request_id + + def _send_request(self, method, url, params, json, data, headers): + json_dict = None + json_error = None + error_details = None + on_cloud_request_id = None + response = None + status_code = None + + def get_json(response): + ''' extract json, and error message if present ''' + error = None + try: + json = response.json() + except ValueError: + return None, None + success_code = [200, 201, 202] + if response.status_code not in success_code: + error = json.get('message') + self.log_error(response.status_code, 'HTTP error: %s' % error) + return json, error + + self.log_request(method=method, url=url, params=params, json=json, data=data, headers=headers) + try: + response = requests.request(method, url, headers=headers, timeout=self.timeout, params=params, json=json, data=data) + status_code = response.status_code + if status_code >= 300 or status_code < 200: + self.log_error(status_code, 'HTTP status code error: %s' % response.content) + return response.content, str(status_code), on_cloud_request_id + # If the response was successful, no Exception will be raised + json_dict, json_error = get_json(response) + if response.headers.get('OnCloud-Request-Id', '') != '': + on_cloud_request_id = response.headers.get('OnCloud-Request-Id') + except requests.exceptions.HTTPError as err: + self.log_error(status_code, 'HTTP error: %s' % err) + error_details = str(err) + except requests.exceptions.ConnectionError as err: + self.log_error(status_code, 'Connection error: %s' % err) + error_details = str(err) + except Exception as err: + self.log_error(status_code, 'Other error: %s' % err) + error_details = str(err) + if json_error is not None: + self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error)) + error_details = json_error + if response: + self.log_debug(status_code, response.content) + return json_dict, error_details, on_cloud_request_id + + # If an error was reported in the json payload, it is handled below + def get(self, api, params=None, header=None): + method = 'GET' + return self.send_request(method=method, api=api, params=params, json=None, header=header) + + def post(self, api, data, params=None, header=None, gcp_type=False, authorized=True): + method = 'POST' + if gcp_type: + return self.send_request(method=method, api=api, params=params, data=data, header=header) + else: + return self.send_request(method=method, api=api, params=params, json=data, header=header, authorized=authorized) + + def patch(self, api, data, params=None, header=None): + method = 'PATCH' + return self.send_request(method=method, api=api, params=params, json=data, header=header) + + def put(self, api, data, params=None, header=None): + method = 'PUT' + return self.send_request(method=method, api=api, params=params, json=data, header=header) + + def delete(self, api, data, params=None, header=None): + method = 'DELETE' + return self.send_request(method=method, api=api, params=params, json=data, header=header) + + def get_token(self): + if self.sa_client_id is not None and self.sa_client_id != "" and self.sa_secret_key is not None and self.sa_secret_key != "": + response, error, ocr_id = self.post(self.environment_data['SA_AUTH_HOST'], + data={"grant_type": "client_credentials", "client_secret": self.sa_secret_key, + "client_id": self.sa_client_id, "audience": "https://api.cloud.netapp.com"}, + authorized=False) + elif self.refresh_token is not None and self.refresh_token != "": + response, error, ocr_id = self.post(self.environment_data['AUTH0_DOMAIN'] + '/oauth/token', + data={"grant_type": "refresh_token", "refresh_token": self.refresh_token, + "client_id": self.environment_data['AUTH0_CLIENT'], + "audience": "https://api.cloud.netapp.com"}, + authorized=False) + else: + self.module.fail_json(msg='Missing refresh_token or sa_client_id and sa_secret_key') + + if error: + self.module.fail_json(msg='Error acquiring token: %s, %s' % (str(error), str(response))) + token = response['access_token'] + token_type = response['token_type'] + + return token_type, token + + def wait_on_completion(self, api_url, action_name, task, retries, wait_interval): + while True: + cvo_status, failure_error_message, error = self.check_task_status(api_url) + if error is not None: + return error + if cvo_status == -1: + return 'Failed to %s %s, error: %s' % (task, action_name, failure_error_message) + elif cvo_status == 1: + return None # success + # status value 0 means pending + if retries == 0: + return 'Taking too long for %s to %s or not properly setup' % (action_name, task) + time.sleep(wait_interval) + retries = retries - 1 + + def check_task_status(self, api_url): + headers = { + 'X-Agent-Id': self.format_client_id(self.module.params['client_id']) + } + + network_retries = 3 + while True: + result, error, dummy = self.get(api_url, None, header=headers) + if error is not None: + if network_retries <= 0: + return 0, '', error + time.sleep(1) + network_retries -= 1 + else: + response = result + break + return response['status'], response['error'], None + + def log_error(self, status_code, message): + LOG.error("%s: %s", status_code, message) + + def log_debug(self, status_code, content): + LOG.debug("%s: %s", status_code, content) + + def log_request(self, method, params, url, json, data, headers): + contents = { + 'method': method, + 'url': url, + 'json': json, + 'data': data + } + if params: + contents['params'] = params + if self.log_headers: + contents['headers'] = headers + self.log_debug('sending', repr(contents)) diff --git a/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..aa73f205a --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py @@ -0,0 +1,1381 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2022, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from copy import deepcopy +import json +import re +import base64 +import time + + +def cmp(a, b): + ''' + Python 3 does not have a cmp function, this will do the cmp. + :param a: first object to check + :param b: second object to check + :return: + ''' + # convert to lower case for string comparison. + if a is None: + return -1 + if isinstance(a, str) and isinstance(b, str): + a = a.lower() + b = b.lower() + # if list has string element, convert string to lower case. + if isinstance(a, list) and isinstance(b, list): + a = [x.lower() if isinstance(x, str) else x for x in a] + b = [x.lower() if isinstance(x, str) else x for x in b] + a.sort() + b.sort() + return (a > b) - (a < b) + + +class NetAppModule(object): + ''' + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + ''' + + def __init__(self): + self.log = [] + self.changed = False + self.parameters = {'name': 'not intialized'} + + def set_parameters(self, ansible_params): + self.parameters = {} + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters + + def get_cd_action(self, current, desired): + ''' takes a desired state and a current state, and return an action: + create, delete, None + eg: + is_present = 'absent' + some_object = self.get_object(source) + if some_object is not None: + is_present = 'present' + action = cd_action(current=is_present, desired = self.desired.state()) + ''' + desired_state = desired['state'] if 'state' in desired else 'present' + if current is None and desired_state == 'absent': + return None + if current is not None and desired_state == 'present': + return None + # change in state + self.changed = True + if current is not None: + return 'delete' + return 'create' + + def compare_and_update_values(self, current, desired, keys_to_compare): + updated_values = {} + is_changed = False + for key in keys_to_compare: + if key in current: + if key in desired and desired[key] is not None: + if current[key] != desired[key]: + updated_values[key] = desired[key] + is_changed = True + else: + updated_values[key] = current[key] + else: + updated_values[key] = current[key] + + return updated_values, is_changed + + def get_working_environments_info(self, rest_api, headers): + ''' + Get all working environments info + ''' + api = "/occm/api/working-environments" + response, error, dummy = rest_api.get(api, None, header=headers) + if error is not None: + return response, error + else: + return response, None + + def look_up_working_environment_by_name_in_list(self, we_list, name): + ''' + Look up working environment by the name in working environment list + ''' + for we in we_list: + if we['name'] == name: + return we, None + return None, "look_up_working_environment_by_name_in_list: Working environment not found" + + def get_working_environment_details_by_name(self, rest_api, headers, name, provider=None): + ''' + Use working environment name to get working environment details including: + name: working environment name, + publicID: working environment ID + cloudProviderName, + isHA, + svmName + ''' + # check the working environment exist or not + api = "/occm/api/working-environments/exists/" + name + response, error, dummy = rest_api.get(api, None, header=headers) + if error is not None: + return None, error + + # get working environment lists + api = "/occm/api/working-environments" + response, error, dummy = rest_api.get(api, None, header=headers) + if error is not None: + return None, error + # look up the working environment in the working environment lists + if provider is None or provider == 'onPrem': + working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['onPremWorkingEnvironments'], name) + if error is None: + return working_environment_details, None + if provider is None or provider == 'gcp': + working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['gcpVsaWorkingEnvironments'], name) + if error is None: + return working_environment_details, None + if provider is None or provider == 'azure': + working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['azureVsaWorkingEnvironments'], name) + if error is None: + return working_environment_details, None + if provider is None or provider == 'aws': + working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['vsaWorkingEnvironments'], name) + if error is None: + return working_environment_details, None + return None, "get_working_environment_details_by_name: Working environment not found" + + def get_working_environment_details(self, rest_api, headers): + ''' + Use working environment id to get working environment details including: + name: working environment name, + publicID: working environment ID + cloudProviderName, + ontapClusterProperties, + isHA, + status, + userTags, + workingEnvironmentType, + ''' + api = "/occm/api/working-environments/" + api += self.parameters['working_environment_id'] + response, error, dummy = rest_api.get(api, None, header=headers) + if error: + return None, "Error: get_working_environment_details %s" % error + return response, None + + def get_aws_fsx_details(self, rest_api, header=None, name=None): + ''' + Use working environment id and tenantID to get working environment details including: + name: working environment name, + publicID: working environment ID + ''' + api = "/fsx-ontap/working-environments/" + api += self.parameters['tenant_id'] + count = 0 + fsx_details = None + if name is None: + name = self.parameters['name'] + response, error, dummy = rest_api.get(api, None, header=header) + if error: + return response, "Error: get_aws_fsx_details %s" % error + for each in response: + if each['name'] == name: + count += 1 + fsx_details = each + if self.parameters.get('working_environment_id'): + if each['id'] == self.parameters['working_environment_id']: + return each, None + if count == 1: + return fsx_details, None + elif count > 1: + return response, "More than one AWS FSx found for %s, use working_environment_id for delete" \ + "or use different name for create" % name + return None, None + + def get_aws_fsx_details_by_id(self, rest_api, header=None): + ''' + Use working environment id and tenantID to get working environment details including: + publicID: working environment ID + ''' + api = "/fsx-ontap/working-environments/%s" % self.parameters['tenant_id'] + response, error, dummy = rest_api.get(api, None, header=header) + if error: + return response, "Error: get_aws_fsx_details %s" % error + for each in response: + if self.parameters.get('destination_working_environment_id') and each['id'] == self.parameters['destination_working_environment_id']: + return each, None + return None, None + + def get_aws_fsx_details_by_name(self, rest_api, header=None): + ''' + Use working environment name and tenantID to get working environment details including: + name: working environment name, + ''' + api = "/fsx-ontap/working-environments/%s" % self.parameters['tenant_id'] + count = 0 + fsx_details = None + response, error, dummy = rest_api.get(api, None, header=header) + if error: + return response, "Error: get_aws_fsx_details_by_name %s" % error + for each in response: + if each['name'] == self.parameters['destination_working_environment_name']: + count += 1 + fsx_details = each + if count == 1: + return fsx_details['id'], None + if count > 1: + return response, "More than one AWS FSx found for %s" % self.parameters['name'] + return None, None + + def get_aws_fsx_svm(self, rest_api, id, header=None): + ''' + Use working environment id and tenantID to get FSx svm details including: + publicID: working environment ID + ''' + api = "/occm/api/fsx/working-environments/%s/svms" % id + response, error, dummy = rest_api.get(api, None, header=header) + if error: + return response, "Error: get_aws_fsx_svm %s" % error + if len(response) == 0: + return None, "Error: no SVM found for %s" % id + return response[0]['name'], None + + def get_working_environment_detail_for_snapmirror(self, rest_api, headers): + + source_working_env_detail, dest_working_env_detail = {}, {} + if self.parameters.get('source_working_environment_id'): + api = '/occm/api/working-environments' + working_env_details, error, dummy = rest_api.get(api, None, header=headers) + if error: + return None, None, "Error getting WE info: %s: %s" % (error, working_env_details) + for dummy, values in working_env_details.items(): + for each in values: + if each['publicId'] == self.parameters['source_working_environment_id']: + source_working_env_detail = each + break + elif self.parameters.get('source_working_environment_name'): + source_working_env_detail, error = self.get_working_environment_details_by_name(rest_api, headers, + self.parameters['source_working_environment_name']) + if error: + return None, None, error + else: + return None, None, "Cannot find working environment by source_working_environment_id or source_working_environment_name" + + if self.parameters.get('destination_working_environment_id'): + if self.parameters['destination_working_environment_id'].startswith('fs-'): + if self.parameters.get('tenant_id'): + working_env_details, error = self.get_aws_fsx_details_by_id(rest_api, header=headers) + if error: + return None, None, "Error getting WE info for FSx: %s: %s" % (error, working_env_details) + dest_working_env_detail['publicId'] = self.parameters['destination_working_environment_id'] + svm_name, error = self.get_aws_fsx_svm(rest_api, self.parameters['destination_working_environment_id'], header=headers) + if error: + return None, None, "Error getting svm name for FSx: %s" % error + dest_working_env_detail['svmName'] = svm_name + else: + return None, None, "Cannot find FSx WE by destination WE %s, missing tenant_id" % self.parameters['destination_working_environment_id'] + else: + api = '/occm/api/working-environments' + working_env_details, error, dummy = rest_api.get(api, None, header=headers) + if error: + return None, None, "Error getting WE info: %s: %s" % (error, working_env_details) + for dummy, values in working_env_details.items(): + for each in values: + if each['publicId'] == self.parameters['destination_working_environment_id']: + dest_working_env_detail = each + break + elif self.parameters.get('destination_working_environment_name'): + if self.parameters.get('tenant_id'): + fsx_id, error = self.get_aws_fsx_details_by_name(rest_api, header=headers) + if error: + return None, None, "Error getting WE info for FSx: %s" % error + dest_working_env_detail['publicId'] = fsx_id + svm_name, error = self.get_aws_fsx_svm(rest_api, fsx_id, header=headers) + if error: + return None, None, "Error getting svm name for FSx: %s" % error + dest_working_env_detail['svmName'] = svm_name + else: + dest_working_env_detail, error = self.get_working_environment_details_by_name(rest_api, headers, + self.parameters['destination_working_environment_name']) + if error: + return None, None, error + else: + return None, None, "Cannot find working environment by destination_working_environment_id or destination_working_environment_name" + + return source_working_env_detail, dest_working_env_detail, None + + def create_account(self, rest_api): + """ + Create Account + :return: Account ID + """ + # TODO? do we need to create an account? And the code below is broken + return None, 'Error: creating an account is not supported.' + # headers = { + # "X-User-Token": rest_api.token_type + " " + rest_api.token, + # } + + # api = '/tenancy/account/MyAccount' + # account_res, error, dummy = rest_api.post(api, header=headers) + # account_id = None if error is not None else account_res['accountPublicId'] + # return account_id, error + + def get_or_create_account(self, rest_api): + """ + Get Account + :return: Account ID + """ + accounts, error = self.get_account_info(rest_api) + if error is not None: + return None, error + if len(accounts) == 0: + return None, 'Error: account cannot be located - check credentials or provide account_id.' + # TODO? creating an account is not supported + # return self.create_account(rest_api) + + return accounts[0]['accountPublicId'], None + + def get_account_info(self, rest_api, headers=None): + """ + Get Account + :return: Account ID + """ + headers = { + "X-User-Token": rest_api.token_type + " " + rest_api.token, + } + + api = '/tenancy/account' + account_res, error, dummy = rest_api.get(api, header=headers) + if error is not None: + return None, error + return account_res, None + + def get_account_id(self, rest_api): + accounts, error = self.get_account_info(rest_api) + if error: + return None, error + if not accounts: + return None, 'Error: no account found - check credentials or provide account_id.' + return accounts[0]['accountPublicId'], None + + def get_accounts_info(self, rest_api, headers): + ''' + Get all accounts info + ''' + api = "/occm/api/accounts" + response, error, dummy = rest_api.get(api, None, header=headers) + if error is not None: + return None, error + else: + return response, None + + def set_api_root_path(self, working_environment_details, rest_api): + ''' + set API url root path based on the working environment provider + ''' + provider = working_environment_details['cloudProviderName'] if working_environment_details.get('cloudProviderName') else None + api_root_path = None + if self.parameters['working_environment_id'].startswith('fs-'): + api_root_path = "/occm/api/fsx" + elif provider == "Amazon": + api_root_path = "/occm/api/aws/ha" if working_environment_details['isHA'] else "/occm/api/vsa" + elif working_environment_details['isHA']: + api_root_path = "/occm/api/" + provider.lower() + "/ha" + else: + api_root_path = "/occm/api/" + provider.lower() + "/vsa" + rest_api.api_root_path = api_root_path + + def have_required_parameters(self, action): + ''' + Check if all the required parameters in self.params are available or not besides the mandatory parameters + ''' + actions = {'create_aggregate': ['number_of_disks', 'disk_size_size', 'disk_size_unit', 'working_environment_id'], + 'update_aggregate': ['number_of_disks', 'disk_size_size', 'disk_size_unit', 'working_environment_id'], + 'delete_aggregate': ['working_environment_id'], + } + missed_params = [ + parameter + for parameter in actions[action] + if parameter not in self.parameters + ] + + if not missed_params: + return True, None + else: + return False, missed_params + + def get_modified_attributes(self, current, desired, get_list_diff=False): + ''' takes two dicts of attributes and return a dict of attributes that are + not in the current state + It is expected that all attributes of interest are listed in current and + desired. + :param: current: current attributes in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: dict of attributes to be modified + :rtype: dict + + NOTE: depending on the attribute, the caller may need to do a modify or a + different operation (eg move volume if the modified attribute is an + aggregate name) + ''' + # if the object does not exist, we can't modify it + modified = {} + if current is None: + return modified + + # error out if keys do not match + # self.check_keys(current, desired) + + # collect changed attributes + for key, value in current.items(): + if key in desired and desired[key] is not None: + if isinstance(value, list): + modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired + if modified_list is not None: + modified[key] = modified_list + elif isinstance(value, dict): + modified_dict = self.get_modified_attributes(value, desired[key]) + if modified_dict: + modified[key] = modified_dict + else: + try: + result = cmp(value, desired[key]) + except TypeError as exc: + raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key]))) + else: + if result != 0: + modified[key] = desired[key] + if modified: + self.changed = True + return modified + + @staticmethod + def compare_lists(current, desired, get_list_diff): + ''' compares two lists and return a list of elements that are either the desired elements or elements that are + modified from the current state depending on the get_list_diff flag + :param: current: current item attribute in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: list of attributes to be modified + :rtype: list + ''' + current_copy = deepcopy(current) + desired_copy = deepcopy(desired) + + # get what in desired and not in current + desired_diff_list = list() + for item in desired: + if item in current_copy: + current_copy.remove(item) + else: + desired_diff_list.append(item) + + # get what in current but not in desired + current_diff_list = [] + for item in current: + if item in desired_copy: + desired_copy.remove(item) + else: + current_diff_list.append(item) + + if desired_diff_list or current_diff_list: + # there are changes + if get_list_diff: + return desired_diff_list + else: + return desired + else: + return None + + @staticmethod + def convert_module_args_to_api(parameters, exclusion=None): + ''' + Convert a list of string module args to API option format. + For example, convert test_option to testOption. + :param parameters: dict of parameters to be converted. + :param exclusion: list of parameters to be ignored. + :return: dict of key value pairs. + ''' + exclude_list = ['api_url', 'token_type', 'refresh_token', 'sa_secret_key', 'sa_client_id'] + if exclusion is not None: + exclude_list += exclusion + api_keys = {} + for k, v in parameters.items(): + if k not in exclude_list: + words = k.split("_") + api_key = "" + for word in words: + if len(api_key) > 0: + word = word.title() + api_key += word + api_keys[api_key] = v + return api_keys + + @staticmethod + def convert_data_to_tabbed_jsonstring(data): + ''' + Convert a dictionary data to json format string + ''' + dump = json.dumps(data, indent=2, separators=(',', ': ')) + return re.sub( + '\n +', + lambda match: '\n' + '\t' * int(len(match.group().strip('\n')) / 2), + dump, + ) + + @staticmethod + def encode_certificates(certificate_file): + ''' + Read certificate file and encode it + ''' + try: + with open(certificate_file, mode='rb') as fh: + cert = fh.read() + except (OSError, IOError) as exc: + return None, str(exc) + if not cert: + return None, "Error: file is empty" + return base64.b64encode(cert).decode('utf-8'), None + + @staticmethod + def get_occm_agents_by_account(rest_api, account_id): + """ + Collect a list of agents matching account_id. + :return: list of agents, error + """ + params = {'account_id': account_id} + api = "/agents-mgmt/agent" + headers = { + "X-User-Token": rest_api.token_type + " " + rest_api.token, + } + agents, error, dummy = rest_api.get(api, header=headers, params=params) + return agents, error + + def get_occm_agents_by_name(self, rest_api, account_id, name, provider): + """ + Collect a list of agents matching account_id, name, and provider. + :return: list of agents, error + """ + # I tried to query by name and provider in addition to account_id, but it returned everything + agents, error = self.get_occm_agents_by_account(rest_api, account_id) + if isinstance(agents, dict) and 'agents' in agents: + agents = [agent for agent in agents['agents'] if agent['name'] == name and agent['provider'] == provider] + return agents, error + + def get_agents_info(self, rest_api, headers): + """ + Collect a list of agents matching account_id. + :return: list of agents, error + """ + account_id, error = self.get_account_id(rest_api) + if error: + return None, error + agents, error = self.get_occm_agents_by_account(rest_api, account_id) + return agents, error + + def get_active_agents_info(self, rest_api, headers): + """ + Collect a list of agents matching account_id. + :return: list of agents, error + """ + clients = [] + account_id, error = self.get_account_id(rest_api) + if error: + return None, error + agents, error = self.get_occm_agents_by_account(rest_api, account_id) + if isinstance(agents, dict) and 'agents' in agents: + agents = [agent for agent in agents['agents'] if agent['status'] == 'active'] + clients = [{'name': agent['name'], 'client_id': agent['agentId'], 'provider': agent['provider']} for agent in agents] + return clients, error + + @staticmethod + def get_occm_agent_by_id(rest_api, client_id): + """ + Fetch OCCM agent given its client id + :return: agent details, error + """ + api = "/agents-mgmt/agent/" + rest_api.format_client_id(client_id) + headers = { + "X-User-Token": rest_api.token_type + " " + rest_api.token, + } + response, error, dummy = rest_api.get(api, header=headers) + if isinstance(response, dict) and 'agent' in response: + agent = response['agent'] + return agent, error + return response, error + + @staticmethod + def check_occm_status(rest_api, client_id): + """ + Check OCCM status + :return: status + DEPRECATED - use get_occm_agent_by_id but the retrun value format is different! + """ + + api = "/agents-mgmt/agent/" + rest_api.format_client_id(client_id) + headers = { + "X-User-Token": rest_api.token_type + " " + rest_api.token, + } + occm_status, error, dummy = rest_api.get(api, header=headers) + return occm_status, error + + def register_agent_to_service(self, rest_api, provider, vpc): + ''' + register agent to service + ''' + api = '/agents-mgmt/connector-setup' + + headers = { + "X-User-Token": rest_api.token_type + " " + rest_api.token, + } + body = { + "accountId": self.parameters['account_id'], + "name": self.parameters['name'], + "company": self.parameters['company'], + "placement": { + "provider": provider, + "region": self.parameters['region'], + "network": vpc, + "subnet": self.parameters['subnet_id'], + }, + "extra": { + "proxy": { + "proxyUrl": self.parameters.get('proxy_url'), + "proxyUserName": self.parameters.get('proxy_user_name'), + "proxyPassword": self.parameters.get('proxy_password'), + } + } + } + + if provider == "AWS": + body['placement']['network'] = vpc + + response, error, dummy = rest_api.post(api, body, header=headers) + return response, error + + def delete_occm(self, rest_api, client_id): + ''' + delete occm + ''' + api = '/agents-mgmt/agent/' + rest_api.format_client_id(client_id) + headers = { + "X-User-Token": rest_api.token_type + " " + rest_api.token, + "X-Tenancy-Account-Id": self.parameters['account_id'], + } + + occm_status, error, dummy = rest_api.delete(api, None, header=headers) + return occm_status, error + + def delete_occm_agents(self, rest_api, agents): + ''' + delete a list of occm + ''' + results = [] + for agent in agents: + if 'agentId' in agent: + occm_status, error = self.delete_occm(rest_api, agent['agentId']) + else: + occm_status, error = None, 'unexpected agent contents: %s' % repr(agent) + if error: + results.append((occm_status, error)) + return results + + @staticmethod + def call_parameters(): + return """ + { + "location": { + "value": "string" + }, + "virtualMachineName": { + "value": "string" + }, + "virtualMachineSize": { + "value": "string" + }, + "networkSecurityGroupName": { + "value": "string" + }, + "adminUsername": { + "value": "string" + }, + "virtualNetworkId": { + "value": "string" + }, + "adminPassword": { + "value": "string" + }, + "subnetId": { + "value": "string" + }, + "customData": { + "value": "string" + }, + "environment": { + "value": "prod" + }, + "storageAccount": { + "value": "string" + } + } + """ + + @staticmethod + def call_template(): + return """ + { + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "string", + "defaultValue": "eastus" + }, + "virtualMachineName": { + "type": "string" + }, + "virtualMachineSize":{ + "type": "string" + }, + "adminUsername": { + "type": "string" + }, + "virtualNetworkId": { + "type": "string" + }, + "networkSecurityGroupName": { + "type": "string" + }, + "adminPassword": { + "type": "securestring" + }, + "subnetId": { + "type": "string" + }, + "customData": { + "type": "string" + }, + "environment": { + "type": "string", + "defaultValue": "prod" + }, + "storageAccount": { + "type": "string" + } + }, + "variables": { + "vnetId": "[parameters('virtualNetworkId')]", + "subnetRef": "[parameters('subnetId')]", + "networkInterfaceName": "[concat(parameters('virtualMachineName'),'-nic')]", + "diagnosticsStorageAccountName": "[parameters('storageAccount')]", + "diagnosticsStorageAccountId": "[concat('Microsoft.Storage/storageAccounts/', variables('diagnosticsStorageAccountName'))]", + "diagnosticsStorageAccountType": "Standard_LRS", + "publicIpAddressName": "[concat(parameters('virtualMachineName'),'-ip')]", + "publicIpAddressType": "Dynamic", + "publicIpAddressSku": "Basic", + "msiExtensionName": "ManagedIdentityExtensionForLinux", + "occmOffer": "[if(equals(parameters('environment'), 'stage'), 'netapp-oncommand-cloud-manager-staging-preview', 'netapp-oncommand-cloud-manager')]" + }, + "resources": [ + { + "name": "[parameters('virtualMachineName')]", + "type": "Microsoft.Compute/virtualMachines", + "apiVersion": "2018-04-01", + "location": "[parameters('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/networkInterfaces/', variables('networkInterfaceName'))]", + "[concat('Microsoft.Storage/storageAccounts/', variables('diagnosticsStorageAccountName'))]" + ], + "properties": { + "osProfile": { + "computerName": "[parameters('virtualMachineName')]", + "adminUsername": "[parameters('adminUsername')]", + "adminPassword": "[parameters('adminPassword')]", + "customData": "[base64(parameters('customData'))]" + }, + "hardwareProfile": { + "vmSize": "[parameters('virtualMachineSize')]" + }, + "storageProfile": { + "imageReference": { + "publisher": "netapp", + "offer": "[variables('occmOffer')]", + "sku": "occm-byol", + "version": "latest" + }, + "osDisk": { + "createOption": "fromImage", + "managedDisk": { + "storageAccountType": "Premium_LRS" + } + }, + "dataDisks": [] + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces', variables('networkInterfaceName'))]" + } + ] + }, + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": + "[concat('https://', variables('diagnosticsStorageAccountName'), '.blob.core.windows.net/')]" + } + } + }, + "plan": { + "name": "occm-byol", + "publisher": "netapp", + "product": "[variables('occmOffer')]" + }, + "identity": { + "type": "systemAssigned" + } + }, + { + "apiVersion": "2017-12-01", + "type": "Microsoft.Compute/virtualMachines/extensions", + "name": "[concat(parameters('virtualMachineName'),'/', variables('msiExtensionName'))]", + "location": "[parameters('location')]", + "dependsOn": [ + "[concat('Microsoft.Compute/virtualMachines/', parameters('virtualMachineName'))]" + ], + "properties": { + "publisher": "Microsoft.ManagedIdentity", + "type": "[variables('msiExtensionName')]", + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": true, + "settings": { + "port": 50342 + } + } + }, + { + "name": "[variables('diagnosticsStorageAccountName')]", + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2015-06-15", + "location": "[parameters('location')]", + "properties": { + "accountType": "[variables('diagnosticsStorageAccountType')]" + } + }, + { + "name": "[variables('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2018-04-01", + "location": "[parameters('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIpAddresses/', variables('publicIpAddressName'))]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "ipconfig1", + "properties": { + "subnet": { + "id": "[variables('subnetRef')]" + }, + "privateIPAllocationMethod": "Dynamic", + "publicIpAddress": { + "id": "[resourceId(resourceGroup().name,'Microsoft.Network/publicIpAddresses', variables('publicIpAddressName'))]" + } + } + } + ], + "networkSecurityGroup": { + "id": "[parameters('networkSecurityGroupName')]" + } + } + }, + { + "name": "[variables('publicIpAddressName')]", + "type": "Microsoft.Network/publicIpAddresses", + "apiVersion": "2017-08-01", + "location": "[parameters('location')]", + "properties": { + "publicIpAllocationMethod": "[variables('publicIpAddressType')]" + }, + "sku": { + "name": "[variables('publicIpAddressSku')]" + } + } + ], + "outputs": { + "publicIpAddressName": { + "type": "string", + "value": "[variables('publicIpAddressName')]" + } + } + } + """ + + def get_tenant(self, rest_api, headers): + """ + Get workspace ID (tenant) + """ + api = '/occm/api/tenants' + response, error, dummy = rest_api.get(api, header=headers) + if error is not None: + return None, 'Error: unexpected response on getting tenant for cvo: %s, %s' % (str(error), str(response)) + + return response[0]['publicId'], None + + def get_nss(self, rest_api, headers): + """ + Get nss account + """ + api = '/occm/api/accounts' + response, error, dummy = rest_api.get(api, header=headers) + if error is not None: + return None, 'Error: unexpected response on getting nss for cvo: %s, %s' % (str(error), str(response)) + + if len(response['nssAccounts']) == 0: + return None, "Error: could not find any NSS account" + + return response['nssAccounts'][0]['publicId'], None + + def get_working_environment_property(self, rest_api, headers, fields): + # GET /vsa/working-environments/{workingEnvironmentId}?fields=status,awsProperties,ontapClusterProperties + api = '%s/working-environments/%s' % (rest_api.api_root_path, self.parameters['working_environment_id']) + params = {'fields': ','.join(fields)} + response, error, dummy = rest_api.get(api, params=params, header=headers) + if error: + return None, "Error: get_working_environment_property %s" % error + return response, None + + def user_tag_key_unique(self, tag_list, key_name): + checked_keys = [] + for t in tag_list: + if t[key_name] in checked_keys: + return False, 'Error: %s %s must be unique' % (key_name, t[key_name]) + else: + checked_keys.append(t[key_name]) + return True, None + + def current_label_exist(self, current, desired, is_ha=False): + current_key_set = set(current.keys()) + # Ignore auto generated gcp label in CVO GCP HA + current_key_set.discard('gcp_resource_id') + current_key_set.discard('count-down') + if is_ha: + current_key_set.discard('partner-platform-serial-number') + # python 2.6 doe snot support set comprehension + desired_keys = set([a_dict['label_key'] for a_dict in desired]) + if current_key_set.issubset(desired_keys): + return True, None + else: + return False, 'Error: label_key %s in gcp_label cannot be removed' % str(current_key_set) + + def is_label_value_changed(self, current_tags, desired_tags): + tag_keys = list(current_tags.keys()) + user_tag_keys = [key for key in tag_keys if + key not in ('count-down', 'gcp_resource_id', 'partner-platform-serial-number')] + desired_keys = [a_dict['label_key'] for a_dict in desired_tags] + if user_tag_keys == desired_keys: + for tag in desired_tags: + if current_tags[tag['label_key']] != tag['label_value']: + return True + return False + else: + return True + + def compare_gcp_labels(self, current_tags, user_tags, is_ha): + ''' + Update user-tag API behaves differently in GCP CVO. + It only supports adding gcp_labels and modifying the values of gcp_labels. Removing gcp_label is not allowed. + ''' + # check if any current gcp_labels are going to be removed or not + # gcp HA has one extra gcp_label created automatically + resp, error = self.user_tag_key_unique(user_tags, 'label_key') + if error is not None: + return None, error + # check if any current key labels are in the desired key labels + resp, error = self.current_label_exist(current_tags, user_tags, is_ha) + if error is not None: + return None, error + if self.is_label_value_changed(current_tags, user_tags): + return True, None + else: + # no change + return None, None + + def compare_cvo_tags_labels(self, current_tags, user_tags): + ''' + Compare exiting tags/labels and user input tags/labels to see if there is a change + gcp_labels: label_key, label_value + aws_tag/azure_tag: tag_key, tag_label + ''' + # azure has one extra azure_tag DeployedByOccm created automatically and it cannot be modified. + tag_keys = list(current_tags.keys()) + user_tag_keys = [key for key in tag_keys if key != 'DeployedByOccm'] + current_len = len(user_tag_keys) + resp, error = self.user_tag_key_unique(user_tags, 'tag_key') + if error is not None: + return None, error + if len(user_tags) != current_len: + return True, None + # Check if tags/labels of desired configuration in current working environment + for item in user_tags: + if item['tag_key'] in current_tags and item['tag_value'] != current_tags[item['tag_key']]: + return True, None + elif item['tag_key'] not in current_tags: + return True, None + return False, None + + def is_cvo_tags_changed(self, rest_api, headers, parameters, tag_name): + ''' + Since tags/laabels are CVO optional parameters, this function needs to cover with/without tags/labels on both lists + ''' + # get working environment details by working environment ID + current, error = self.get_working_environment_details(rest_api, headers) + if error is not None: + return None, 'Error: Cannot find working environment %s error: %s' % (self.parameters['working_environment_id'], str(error)) + self.set_api_root_path(current, rest_api) + # compare tags + # no tags in current cvo + if 'userTags' not in current or len(current['userTags']) == 0: + return tag_name in parameters, None + + if tag_name == 'gcp_labels': + if tag_name in parameters: + return self.compare_gcp_labels(current['userTags'], parameters[tag_name], current['isHA']) + # if both are empty, no need to update + # Ignore auto generated gcp label in CVO GCP + # 'count-down', 'gcp_resource_id', and 'partner-platform-serial-number'(HA) + tag_keys = list(current['userTags'].keys()) + user_tag_keys = [key for key in tag_keys if key not in ('count-down', 'gcp_resource_id', 'partner-platform-serial-number')] + if not user_tag_keys: + return False, None + else: + return None, 'Error: Cannot remove current gcp_labels' + # no tags in input parameters + if tag_name not in parameters: + return True, None + else: + # has tags in input parameters and existing CVO + return self.compare_cvo_tags_labels(current['userTags'], parameters[tag_name]) + + def get_license_type(self, rest_api, headers, provider, region, instance_type, ontap_version, license_name): + # Permutation query example: + # aws: /metadata/permutations?region=us-east-1&instance_type=m5.xlarge&version=ONTAP-9.10.1.T1 + # azure: /metadata/permutations?region=westus&instance_type=Standard_E4s_v3&version=ONTAP-9.10.1.T1.azure + # gcp: /metadata/permutations?region=us-east1&instance_type=n2-standard-4&version=ONTAP-9.10.1.T1.gcp + # The examples of the ontapVersion in ontapClusterProperties response: + # AWS for both single and HA: 9.10.1RC1, 9.8 + # AZURE single: 9.10.1RC1.T1.azure. For HA: 9.10.1RC1.T1.azureha + # GCP for both single and HA: 9.10.1RC1.T1, 9.8.T1 + # To be used in permutation: + # AWS ontap_version format: ONTAP-x.x.x.T1 or ONTAP-x.x.x.T1.ha for Ha + # AZURE ontap_version format: ONTAP-x.x.x.T1.azure or ONTAP-x.x.x.T1.azureha for HA + # GCP ontap_version format: ONTAP-x.x.x.T1.gcp or ONTAP-x.x.x.T1.gcpha for HA + version = 'ONTAP-' + ontap_version + if provider == 'aws': + version += '.T1.ha' if self.parameters['is_ha'] else '.T1' + elif provider == 'gcp': + version += '.T1' if not ontap_version.endswith('T1') else '' + version += '.gcpha' if self.parameters['is_ha'] else '.gcp' + api = '%s/metadata/permutations' % rest_api.api_root_path + params = {'region': region, + 'version': version, + 'instance_type': instance_type + } + response, error, dummy = rest_api.get(api, params=params, header=headers) + if error: + return None, "Error: get_license_type %s %s" % (response, error) + for item in response: + if item['license']['name'] == license_name: + return item['license']['type'], None + + return None, "Error: get_license_type cannot get license type %s" % response + + def get_modify_cvo_params(self, rest_api, headers, desired, provider): + modified = [] + if desired['update_svm_password']: + modified = ['svm_password'] + # Get current working environment property + properties = ['status', 'ontapClusterProperties.fields(upgradeVersions)'] + # instanceType in aws case is stored in awsProperties['instances'][0]['instanceType'] + if provider == 'aws': + properties.append('awsProperties') + else: + properties.append('providerProperties') + + we, err = self.get_working_environment_property(rest_api, headers, properties) + + if err is not None: + return None, err + + if we['status'] is None or we['status']['status'] != 'ON': + return None, "Error: get_modify_cvo_params working environment %s status is not ON. Operation cannot be performed." % we['publicId'] + + tier_level = None + if we['ontapClusterProperties']['capacityTierInfo'] is not None: + tier_level = we['ontapClusterProperties']['capacityTierInfo']['tierLevel'] + + # collect changed attributes + if tier_level is not None and tier_level != desired['tier_level']: + if provider == 'azure': + if desired['capacity_tier'] == 'Blob': + modified.append('tier_level') + elif provider == 'aws': + if desired['capacity_tier'] == 'S3': + modified.append('tier_level') + elif provider == 'gcp': + if desired['capacity_tier'] == 'cloudStorage': + modified.append('tier_level') + + if 'svm_name' in desired and we['svmName'] != desired['svm_name']: + modified.append('svm_name') + + if 'writing_speed_state' in desired: + if we['ontapClusterProperties']['writingSpeedState'] != desired['writing_speed_state'].upper(): + modified.append('writing_speed_state') + + if provider == 'aws': + current_instance_type = we['awsProperties']['instances'][0]['instanceType'] + region = we['awsProperties']['regionName'] + else: + current_instance_type = we['providerProperties']['instanceType'] + region = we['providerProperties']['regionName'] + + if current_instance_type != desired['instance_type']: + modified.append('instance_type') + + # check if license type is changed + current_license_type, error = self.get_license_type(rest_api, headers, provider, region, current_instance_type, + we['ontapClusterProperties']['ontapVersion'], + we['ontapClusterProperties']['licenseType']['name']) + if err is not None: + return None, error + if current_license_type != desired['license_type']: + modified.append('license_type') + + if desired['upgrade_ontap_version'] is True: + if desired['use_latest_version'] or desired['ontap_version'] == 'latest': + return None, "Error: To upgrade ONTAP image, the ontap_version must be a specific version" + current_version = 'ONTAP-' + we['ontapClusterProperties']['ontapVersion'] + if not desired['ontap_version'].startswith(current_version): + if we['ontapClusterProperties']['upgradeVersions'] is not None: + available_versions = [] + for image_info in we['ontapClusterProperties']['upgradeVersions']: + available_versions.append(image_info['imageVersion']) + # AWS ontap_version format: ONTAP-x.x.x.Tx or ONTAP-x.x.x.Tx.ha for Ha + # AZURE ontap_version format: ONTAP-x.x.x.Tx.azure or .azureha for HA + # GCP ontap_version format: ONTAP-x.x.x.Tx.gcp or .gcpha for HA + # Tx is not relevant for ONTAP version. But it is needed for the CVO creation + # upgradeVersion imageVersion format: ONTAP-x.x.x + if desired['ontap_version'].startswith(image_info['imageVersion']): + modified.append('ontap_version') + break + else: + return None, "Error: No ONTAP image available for version %s. Available versions: %s" % (desired['ontap_version'], available_versions) + + tag_name = { + 'aws': 'aws_tag', + 'azure': 'azure_tag', + 'gcp': 'gcp_labels' + } + + need_change, error = self.is_cvo_tags_changed(rest_api, headers, desired, tag_name[provider]) + if error is not None: + return None, error + if need_change: + modified.append(tag_name[provider]) + + # The updates of followings are not supported. Will response failure. + for key, value in desired.items(): + if key == 'project_id' and we['providerProperties']['projectName'] != value: + modified.append('project_id') + if key == 'zone' and we['providerProperties']['zoneName'][0] != value: + modified.append('zone') + if key == 'cidr' and we['providerProperties']['vnetCidr'] != value: + modified.append('cidr') + if key == 'location' and we['providerProperties']['regionName'] != value: + modified.append('location') + if key == 'availability_zone' and we['providerProperties']['availabilityZone'] != value: + modified.append('availability_zone') + + if modified: + self.changed = True + return modified, None + + def is_cvo_update_needed(self, rest_api, headers, parameters, changeable_params, provider): + modify, error = self.get_modify_cvo_params(rest_api, headers, parameters, provider) + if error is not None: + return None, error + unmodifiable = [attr for attr in modify if attr not in changeable_params] + if unmodifiable: + return None, "%s cannot be modified." % str(unmodifiable) + + return modify, None + + def wait_cvo_update_complete(self, rest_api, headers): + retry_count = 65 + if self.parameters['is_ha'] is True: + retry_count *= 2 + for count in range(retry_count): + # get CVO status + we, err = self.get_working_environment_property(rest_api, headers, ['status']) + if err is not None: + return False, 'Error: get_working_environment_property failed: %s' % (str(err)) + if we['status']['status'] != "UPDATING": + return True, None + time.sleep(60) + + return False, 'Error: Taking too long for CVO to be active after update or not properly setup' + + def update_cvo_tags(self, api_root, rest_api, headers, tag_name, tag_list): + body = {} + tags = [] + if tag_list is not None: + for tag in tag_list: + atag = { + 'tagKey': tag['label_key'] if tag_name == "gcp_labels" else tag['tag_key'], + 'tagValue': tag['label_value'] if tag_name == "gcp_labels" else tag['tag_value'] + } + tags.append(atag) + body['tags'] = tags + + response, err, dummy = rest_api.put(api_root + "user-tags", body, header=headers) + if err is not None: + return False, 'Error: unexpected response on modifying tags: %s, %s' % (str(err), str(response)) + + return True, None + + def update_svm_password(self, api_root, rest_api, headers, svm_password): + body = {'password': svm_password} + response, err, dummy = rest_api.put(api_root + "set-password", body, header=headers) + if err is not None: + return False, 'Error: unexpected response on modifying svm_password: %s, %s' % (str(err), str(response)) + + return True, None + + def update_svm_name(self, api_root, rest_api, headers, svm_name): + # get current svmName + we, err = self.get_working_environment_property(rest_api, headers, ['ontapClusterProperties.fields(upgradeVersions)']) + if err is not None: + return False, 'Error: get_working_environment_property failed: %s' % (str(err)) + body = {'svmNewName': svm_name, + 'svmName': we['svmName']} + response, err, dummy = rest_api.put(api_root + "svm", body, header=headers) + if err is not None: + return False, "update svm_name error" + return True, None + + def update_tier_level(self, api_root, rest_api, headers, tier_level): + body = {'level': tier_level} + response, err, dummy = rest_api.post(api_root + "change-tier-level", body, header=headers) + if err is not None: + return False, 'Error: unexpected response on modify tier_level: %s, %s' % (str(err), str(response)) + + return True, None + + def update_writing_speed_state(self, api_root, rest_api, headers, writing_speed_state): + body = {'writingSpeedState': writing_speed_state.upper()} + response, err, dummy = rest_api.put(api_root + "writing-speed", body, header=headers) + if err is not None: + return False, 'Error: unexpected response on modify writing_speed_state: %s, %s' % (str(err), str(response)) + # check upgrade status + dummy, err = self.wait_cvo_update_complete(rest_api, headers) + return err is None, err + + def update_instance_license_type(self, api_root, rest_api, headers, instance_type, license_type): + body = {'instanceType': instance_type, + 'licenseType': license_type} + response, err, dummy = rest_api.put(api_root + "license-instance-type", body, header=headers) + if err is not None: + return False, 'Error: unexpected response on modify instance_type and license_type: %s, %s' % (str(err), str(response)) + # check upgrade status + dummy, err = self.wait_cvo_update_complete(rest_api, headers) + return err is None, err + + def set_config_flag(self, rest_api, headers): + body = {'value': True, 'valueType': 'BOOLEAN'} + base_url = '/occm/api/occm/config/skip-eligibility-paygo-upgrade' + response, err, dummy = rest_api.put(base_url, body, header=headers) + if err is not None: + return False, "set_config_flag error" + + return True, None + + def do_ontap_image_upgrade(self, rest_api, headers, desired): + # get ONTAP image version + we, err = self.get_working_environment_property(rest_api, headers, ['ontapClusterProperties.fields(upgradeVersions)']) + if err is not None: + return False, 'Error: get_working_environment_property failed: %s' % (str(err)) + body = {'updateType': "OCCM_PROVIDED"} + for image_info in we['ontapClusterProperties']['upgradeVersions']: + if image_info['imageVersion'] in desired: + body['updateParameter'] = image_info['imageVersion'] + break + # upgrade + base_url = "%s/working-environments/%s/update-image" % (rest_api.api_root_path, self.parameters['working_environment_id']) + response, err, dummy = rest_api.post(base_url, body, header=headers) + if err is not None: + return False, 'Error: unexpected response on do_ontap_image_upgrade: %s, %s' % (str(err), str(response)) + else: + return True, None + + def wait_ontap_image_upgrade_complete(self, rest_api, headers, desired): + retry_count = 65 + if self.parameters['is_ha'] is True: + retry_count *= 2 + for count in range(retry_count): + # get CVO status + we, err = self.get_working_environment_property(rest_api, headers, ['status', 'ontapClusterProperties']) + if err is not None: + return False, 'Error: get_working_environment_property failed: %s' % (str(err)) + if we['status']['status'] != "UPDATING" and we['ontapClusterProperties']['ontapVersion'] != "": + if we['ontapClusterProperties']['ontapVersion'] in desired: + return True, None + time.sleep(60) + + return False, 'Error: Taking too long for CVO to be active or not properly setup' + + def upgrade_ontap_image(self, rest_api, headers, desired): + # set flag + dummy, err = self.set_config_flag(rest_api, headers) + if err is not None: + return False, err + # upgrade + dummy, err = self.do_ontap_image_upgrade(rest_api, headers, desired) + if err is not None: + return False, err + # check upgrade status + dummy, err = self.wait_ontap_image_upgrade_complete(rest_api, headers, desired) + return err is None, err diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py new file mode 100644 index 000000000..9533d5f91 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py @@ -0,0 +1,332 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_aggregate +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_aggregate +short_description: NetApp Cloud Manager Aggregate +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, Modify or Delete Aggregate on Cloud Manager. + +options: + state: + description: + - Whether the specified aggregate should exist or not. + choices: ['present', 'absent'] + required: true + type: str + + name: + description: + - The name of the new aggregate. + required: true + type: str + + working_environment_name: + description: + - The working environment name where the aggregate will be created. + type: str + + working_environment_id: + description: + - The public ID of the working environment where the aggregate will be created. + type: str + + client_id: + description: + - The connector ID of the Cloud Manager Connector. + required: true + type: str + + number_of_disks: + description: + - The required number of disks in the new aggregate. + type: int + + disk_size_size: + description: + - The required size of the disks. + type: int + + disk_size_unit: + description: + - The disk size unit ['GB' or 'TB']. The default is 'TB'. + choices: ['GB', 'TB'] + default: 'TB' + type: str + + home_node: + description: + - The home node that the new aggregate should belong to. + type: str + + provider_volume_type: + description: + - The cloud provider volume type. + type: str + + capacity_tier: + description: + - The aggregate's capacity tier for tiering cold data to object storage. + - If the value is NONE, the capacity_tier will not be set on aggregate creation. + choices: [ 'NONE', 'S3', 'Blob', 'cloudStorage'] + type: str + + iops: + description: + - Provisioned IOPS. Needed only when providerVolumeType is "io1". + type: int + + throughput: + description: + - Unit is Mb/s. Valid range 125-1000. + - Required only when provider_volume_type is 'gp3'. + type: int + +notes: +- Support check_mode. +''' + +EXAMPLES = ''' +- name: Create Aggregate + netapp.cloudmanager.na_cloudmanager_aggregate: + state: present + name: AnsibleAggregate + working_environment_name: testAWS + client_id: "{{ client_id }}" + number_of_disks: 2 + refresh_token: xxx + +- name: Delete Volume + netapp.cloudmanager.na_cloudmanager_aggregate: + state: absent + name: AnsibleAggregate + working_environment_name: testAWS + client_id: "{{ client_id }}" + refresh_token: xxx +''' + +RETURN = ''' +msg: + description: Success message. + returned: success + type: str + sample: "Aggregate Created" +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + + +class NetAppCloudmanagerAggregate(object): + ''' + Contains methods to parse arguments, + derive details of CloudmanagerAggregate objects + and send requests to CloudmanagerAggregate via + the restApi + ''' + + def __init__(self): + ''' + Parse arguments, setup state variables, + check parameters and ensure request module is installed + ''' + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True, type='str'), + working_environment_id=dict(required=False, type='str'), + working_environment_name=dict(required=False, type='str'), + client_id=dict(required=True, type='str'), + number_of_disks=dict(required=False, type='int'), + disk_size_size=dict(required=False, type='int'), + disk_size_unit=dict(required=False, choices=['GB', 'TB'], default='TB'), + home_node=dict(required=False, type='str'), + provider_volume_type=dict(required=False, type='str'), + capacity_tier=dict(required=False, choices=['NONE', 'S3', 'Blob', 'cloudStorage'], type='str'), + iops=dict(required=False, type='int'), + throughput=dict(required=False, type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[ + ['refresh_token', 'sa_client_id'], + ['working_environment_name', 'working_environment_id'], + ], + required_together=[['sa_client_id', 'sa_secret_key']], + required_if=[ + ['provider_volume_type', 'gp3', ['iops', 'throughput']], + ['provider_volume_type', 'io1', ['iops']], + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic rest_api class + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = None + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + + def get_aggregate(self): + ''' + Get aggregate details + ''' + working_environment_detail = None + if 'working_environment_id' in self.parameters: + working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers) + if error is not None: + self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error)) + elif 'working_environment_name' in self.parameters: + working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api, + self.headers, + self.parameters['working_environment_name']) + if error is not None: + self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error)) + else: + self.module.fail_json(msg="Error: Missing working environment information") + if working_environment_detail is not None: + self.parameters['working_environment_id'] = working_environment_detail['publicId'] + self.na_helper.set_api_root_path(working_environment_detail, self.rest_api) + api_root_path = self.rest_api.api_root_path + + if working_environment_detail['cloudProviderName'] != "Amazon": + api = '%s/aggregates/%s' % (api_root_path, working_environment_detail['publicId']) + else: + api = '%s/aggregates?workingEnvironmentId=%s' % (api_root_path, working_environment_detail['publicId']) + response, error, dummy = self.rest_api.get(api, header=self.headers) + if error: + self.module.fail_json(msg="Error: Failed to get aggregate list: %s, %s" % (str(error), str(response))) + for aggr in response: + if aggr['name'] == self.parameters['name']: + return aggr + return None + + def create_aggregate(self): + ''' + Create aggregate + ''' + api = '%s/aggregates' % self.rest_api.api_root_path + # check if all the required parameters exist + body = { + 'name': self.parameters['name'], + 'workingEnvironmentId': self.parameters['working_environment_id'], + 'numberOfDisks': self.parameters['number_of_disks'], + 'diskSize': {'size': self.parameters['disk_size_size'], + 'unit': self.parameters['disk_size_unit']}, + } + # optional parameters + if 'home_node' in self.parameters: + body['homeNode'] = self.parameters['home_node'] + if 'provider_volume_type' in self.parameters: + body['providerVolumeType'] = self.parameters['provider_volume_type'] + if 'capacity_tier' in self.parameters and self.parameters['capacity_tier'] != "NONE": + body['capacityTier'] = self.parameters['capacity_tier'] + if 'iops' in self.parameters: + body['iops'] = self.parameters['iops'] + if 'throughput' in self.parameters: + body['throughput'] = self.parameters['throughput'] + response, error, dummy = self.rest_api.post(api, body, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on aggregate creation: %s, %s" % (str(error), str(response))) + + def update_aggregate(self, add_number_of_disks): + ''' + Update aggregate with aggregate name and the parameters number_of_disks will be added + ''' + api = '%s/aggregates/%s/%s/disks' % (self.rest_api.api_root_path, self.parameters['working_environment_id'], + self.parameters['name']) + body = { + 'aggregateName': self.parameters['name'], + 'workingEnvironmentId': self.parameters['working_environment_id'], + 'numberOfDisks': add_number_of_disks + } + response, error, dummy = self.rest_api.post(api, body, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on aggregate adding disks: %s, %s" % (str(error), str(response))) + + def delete_aggregate(self): + ''' + Delete aggregate with aggregate name + ''' + api = '%s/aggregates/%s/%s' % (self.rest_api.api_root_path, self.parameters['working_environment_id'], + self.parameters['name']) + body = { + 'aggregateName': self.parameters['name'], + 'workingEnvironmentId': self.parameters['working_environment_id'], + } + response, error, dummy = self.rest_api.delete(api, body, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on aggregate deletion: %s, %s" % (str(error), str(response))) + + def apply(self): + ''' + Check, process and initiate aggregate operation + ''' + # check if aggregate exists + current = self.get_aggregate() + # check the action + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed: + action = cd_action + "_aggregate" + have_all_required, missed_params = self.na_helper.have_required_parameters(action) + if not have_all_required: + self.module.fail_json(msg="Error: Missing required parameters (%s) on %s" % (str(missed_params), action)) + add_disks = 0 + if current and self.parameters['state'] != 'absent': + have_all_required, missed_params = self.na_helper.have_required_parameters("update_aggregate") + if not have_all_required: + self.module.fail_json(msg="Error: Missing required parameters (%s) on update_aggregate" % str(missed_params)) + if len(current['disks']) < self.parameters['number_of_disks']: + add_disks = self.parameters['number_of_disks'] - len(current['disks']) + self.na_helper.changed = True + elif len(current['disks']) > self.parameters['number_of_disks']: + self.module.fail_json(msg="Error: Only add disk support. number_of_disks cannot be reduced") + + result_message = "" + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "create": + self.create_aggregate() + result_message = "Aggregate Created" + elif cd_action == "delete": + self.delete_aggregate() + result_message = "Aggregate Deleted" + else: # modify + self.update_aggregate(add_disks) + result_message = "Aggregate Updated" + self.module.exit_json(changed=self.na_helper.changed, msg=result_message) + + +def main(): + ''' + Create NetAppCloudmanagerAggregate class instance and invoke apply + :return: None + ''' + na_cloudmanager_aggregate = NetAppCloudmanagerAggregate() + na_cloudmanager_aggregate.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py new file mode 100644 index 000000000..8e757b989 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py @@ -0,0 +1,458 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_aws_fsx +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_aws_fsx +short_description: Cloud ONTAP file system(FSx) in AWS +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.13.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or delete CVO/Working Environment for AWS FSx. + +options: + + state: + description: + - Whether the specified FSx in AWS should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the CVO/Working Environment for AWS FSx to manage. + type: str + + region: + description: + - The region where the working environment will be created. + type: str + + aws_credentials_name: + description: + - The name of the AWS Credentials account name. + type: str + + workspace_id: + description: + - The ID of the Cloud Manager workspace of working environment. + type: str + + tenant_id: + required: true + description: + - The NetApp account ID that the File System will be associated with. + type: str + + working_environment_id: + description: + - The ID of the AWS FSx working environment used for delete. + type: str + + storage_capacity_size: + description: + - volume size for the first data aggregate. + - For GB, the value can be [100 or 500]. + - For TB, the value can be [1,2,4,8,16]. + type: int + + storage_capacity_size_unit: + description: + - The unit for volume size. + choices: ['GiB', 'TiB'] + type: str + + fsx_admin_password: + description: + - The admin password for Cloud Volumes ONTAP fsxadmin user. + type: str + + throughput_capacity: + description: + - The capacity of the throughput. + choices: [512, 1024, 2048] + type: int + + security_group_ids: + description: + - The IDs of the security groups for the working environment, multiple security groups can be provided separated by ','. + type: list + elements: str + + kms_key_id: + description: + - AWS encryption parameters. It is required if using aws encryption. + type: str + + tags: + description: + - Additional tags for the FSx AWS working environment. + type: list + elements: dict + suboptions: + tag_key: + description: The key of the tag. + type: str + tag_value: + description: The tag value. + type: str + + primary_subnet_id: + description: + - The subnet ID of the first node. + type: str + + secondary_subnet_id: + description: + - The subnet ID of the second node. + type: str + + route_table_ids: + description: + - The list of route table IDs that will be updated with the floating IPs. + type: list + elements: str + + minimum_ssd_iops: + description: + - Provisioned SSD IOPS. + type: int + + endpoint_ip_address_range: + description: + - The endpoint IP address range. + type: str + + import_file_system: + description: + - bool option to existing import AWS file system to CloudManager. + type: bool + default: false + version_added: 21.17.0 + + file_system_id: + description: + - The AWS file system ID to import to CloudManager. Required when import_file_system is 'True' + type: str + version_added: 21.17.0 + +notes: +- Support check_mode. +''' + +EXAMPLES = """ +- name: Create NetApp AWS FSx + netapp.cloudmanager.na_cloudmanager_aws_fsx: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: fsxAnsible + region: us-east-2 + workspace_id: workspace-xxxxx + tenant_id: account-xxxxx + storage_capacity_size: 1024 + storage_capacity_size_unit: TiB + aws_credentials_name: xxxxxxx + primary_subnet_id: subnet-xxxxxx + secondary_subnet_id: subnet-xxxxx + throughput_capacity: 512 + fsx_admin_password: xxxxxxx + tags: [ + {tag_key: abcd, + tag_value: ABCD}] + +- name: Import AWS FSX + na_cloudmanager_aws_fsx: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: fsxAnsible + region: us-west-2 + workspace_id: workspace-xxxxx + import_file_system: True + file_system_id: "{{ xxxxxxxxxxxxxxx }}" + tenant_id: account-xxxxx + aws_credentials_name: xxxxxxx + +- name: Delete NetApp AWS FSx + netapp.cloudmanager.na_cloudmanager_aws_fsx: + state: absent + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + working_environment_id: fs-xxxxxx + name: fsxAnsible + tenant_id: account-xxxxx +""" + +RETURN = ''' +working_environment_id: + description: Newly created AWS FSx working_environment_id. + type: str + returned: success +''' + +import time + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + + +class NetAppCloudManagerAWSFSX: + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + region=dict(required=False, type='str'), + aws_credentials_name=dict(required=False, type='str'), + workspace_id=dict(required=False, type='str'), + tenant_id=dict(required=True, type='str'), + working_environment_id=dict(required=False, type='str'), + storage_capacity_size=dict(required=False, type='int'), + storage_capacity_size_unit=dict(required=False, type='str', choices=['GiB', 'TiB']), + fsx_admin_password=dict(required=False, type='str', no_log=True), + throughput_capacity=dict(required=False, type='int', choices=[512, 1024, 2048]), + security_group_ids=dict(required=False, type='list', elements='str'), + kms_key_id=dict(required=False, type='str', no_log=True), + tags=dict(required=False, type='list', elements='dict', options=dict( + tag_key=dict(type='str', no_log=False), + tag_value=dict(type='str') + )), + primary_subnet_id=dict(required=False, type='str'), + secondary_subnet_id=dict(required=False, type='str'), + route_table_ids=dict(required=False, type='list', elements='str'), + minimum_ssd_iops=dict(required=False, type='int'), + endpoint_ip_address_range=dict(required=False, type='str'), + import_file_system=dict(required=False, type='bool', default=False), + file_system_id=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ['state', 'present', ['region', 'aws_credentials_name', 'workspace_id', 'fsx_admin_password', 'throughput_capacity', + 'primary_subnet_id', 'secondary_subnet_id', 'storage_capacity_size', 'storage_capacity_size_unit']], + ['import_file_system', True, ['file_system_id']] + ], + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key'], ['storage_capacity_size', 'storage_capacity_size_unit']], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.headers = None + if self.rest_api.simulator: + self.headers = { + 'x-simulator': 'true' + } + if self.parameters['state'] == 'present': + self.aws_credentials_id, error = self.get_aws_credentials_id() + if error is not None: + self.module.fail_json(msg=str(error)) + + def get_aws_credentials_id(self): + """ + Get aws_credentials_id + :return: AWS Credentials ID + """ + api = "/fsx-ontap/aws-credentials/" + api += self.parameters['tenant_id'] + response, error, dummy = self.rest_api.get(api, None, header=self.headers) + if error: + return response, "Error: getting aws_credentials_id %s" % error + for each in response: + if each['name'] == self.parameters['aws_credentials_name']: + return each['id'], None + return None, "Error: aws_credentials_name not found" + + def discover_aws_fsx(self): + """ + discover aws_fsx + """ + api = "/fsx-ontap/working-environments/%s/discover?credentials-id=%s&workspace-id=%s®ion=%s"\ + % (self.parameters['tenant_id'], self.aws_credentials_id, self.parameters['workspace_id'], self.parameters['region']) + response, error, dummy = self.rest_api.get(api, None, header=self.headers) + if error: + return "Error: discovering aws_fsx %s" % error + id_found = False + for each in response: + if each['id'] == self.parameters['file_system_id']: + id_found = True + break + if not id_found: + return "Error: file_system_id provided could not be found" + + def recover_aws_fsx(self): + """ + recover aws_fsx + """ + json = {"name": self.parameters['name'], + "region": self.parameters['region'], + "workspaceId": self.parameters['workspace_id'], + "credentialsId": self.aws_credentials_id, + "fileSystemId": self.parameters['file_system_id'], + } + api_url = "/fsx-ontap/working-environments/%s/recover" % self.parameters['tenant_id'] + response, error, dummy = self.rest_api.post(api_url, json, header=self.headers) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on recovering AWS FSx: %s, %s" % (error, response)) + + def create_aws_fsx(self): + """ Create AWS FSx """ + json = {"name": self.parameters['name'], + "region": self.parameters['region'], + "workspaceId": self.parameters['workspace_id'], + "credentialsId": self.aws_credentials_id, + "throughputCapacity": self.parameters['throughput_capacity'], + "storageCapacity": { + "size": self.parameters['storage_capacity_size'], + "unit": self.parameters['storage_capacity_size_unit']}, + "fsxAdminPassword": self.parameters['fsx_admin_password'], + "primarySubnetId": self.parameters['primary_subnet_id'], + "secondarySubnetId": self.parameters['secondary_subnet_id'], + } + + if self.parameters.get('tags') is not None: + tags = [] + for each_tag in self.parameters['tags']: + tag = { + 'key': each_tag['tag_key'], + 'value': each_tag['tag_value'] + } + + tags.append(tag) + json.update({"tags": tags}) + + if self.parameters.get('security_group_ids'): + json.update({"securityGroupIds": self.parameters['security_group_ids']}) + + if self.parameters.get('route_table_ids'): + json.update({"routeTableIds": self.parameters['route_table_ids']}) + + if self.parameters.get('kms_key_id'): + json.update({"kmsKeyId": self.parameters['kms_key_id']}) + + if self.parameters.get('minimum_ssd_iops'): + json.update({"minimumSsdIops": self.parameters['minimum_ssd_iops']}) + + if self.parameters.get('endpoint_ip_address_range'): + json.update({"endpointIpAddressRange": self.parameters['endpoint_ip_address_range']}) + + api_url = '/fsx-ontap/working-environments/%s' % self.parameters['tenant_id'] + response, error, dummy = self.rest_api.post(api_url, json, header=self.headers) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on creating AWS FSx: %s, %s" % (str(error), str(response))) + working_environment_id = response['id'] + creation_wait_time = 30 + creation_retry_count = 30 + wait_on_completion_api_url = '/fsx-ontap/working-environments/%s/%s?provider-details=true' % (self.parameters['tenant_id'], working_environment_id) + + err = self.wait_on_completion_for_fsx(wait_on_completion_api_url, "AWS_FSX", "create", creation_retry_count, creation_wait_time) + + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating AWS FSX: %s" % str(err)) + + return working_environment_id + + def wait_on_completion_for_fsx(self, api_url, action_name, task, retries, wait_interval): + while True: + fsx_status, error = self.check_task_status_for_fsx(api_url) + if error is not None: + return error + if fsx_status['status']['status'] == "ON" and fsx_status['status']['lifecycle'] == "AVAILABLE": + return None + elif fsx_status['status']['status'] == "FAILED": + return 'Failed to %s %s' % (task, action_name) + if retries == 0: + return 'Taking too long for %s to %s or not properly setup' % (action_name, task) + time.sleep(wait_interval) + retries = retries - 1 + + def check_task_status_for_fsx(self, api_url): + + network_retries = 3 + exponential_retry_time = 1 + while True: + result, error, dummy = self.rest_api.get(api_url, None, header=self.headers) + if error is not None: + if network_retries > 0: + time.sleep(exponential_retry_time) + exponential_retry_time *= 2 + network_retries = network_retries - 1 + else: + return 0, error + else: + response = result + break + return response['providerDetails'], None + + def delete_aws_fsx(self, id, tenant_id): + """ + Delete AWS FSx + """ + api_url = '/fsx-ontap/working-environments/%s/%s' % (tenant_id, id) + response, error, dummy = self.rest_api.delete(api_url, None, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on deleting AWS FSx: %s, %s" % (str(error), str(response))) + + def apply(self): + """ + Apply action to the AWS FSx working Environment + :return: None + """ + working_environment_id = None + current, error = self.na_helper.get_aws_fsx_details(self.rest_api, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on fetching AWS FSx: %s" % str(error)) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.parameters['import_file_system'] and cd_action == "create": + error = self.discover_aws_fsx() + if error is not None: + self.module.fail_json(msg="Error: unexpected response on discovering AWS FSx: %s" % str(error)) + cd_action = "import" + self.na_helper.changed = True + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "import": + self.recover_aws_fsx() + working_environment_id = self.parameters['file_system_id'] + elif cd_action == "create": + working_environment_id = self.create_aws_fsx() + elif cd_action == "delete": + self.delete_aws_fsx(current['id'], self.parameters['tenant_id']) + + self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id) + + +def main(): + """ + Create AWS FSx class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerAWSFSX() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py new file mode 100644 index 000000000..89e10a81b --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py @@ -0,0 +1,265 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_cifs_server +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_cifs_server +short_description: NetApp Cloud Manager cifs server +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or Delete a CIFS server on the Cloud Volume ONTAP system to support CIFS volumes, based on an Active Directory or Workgroup. + +options: + state: + description: + - Whether the specified cifs server should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + working_environment_name: + description: + - The working environment name where the cifs server will be created. + type: str + + working_environment_id: + description: + - The public ID of the working environment where the cifs server will be created. + type: str + + client_id: + description: + - The connector ID of the Cloud Manager Connector. + required: true + type: str + + domain: + description: + - The active directory domain name. For CIFS AD only. + type: str + + dns_domain: + description: + - The DNS domain name. For CIFS AD only. + type: str + + username: + description: + - The active directory admin user name. For CIFS AD only. + type: str + + password: + description: + - The active directory admin password. For CIFS AD only. + type: str + + ip_addresses: + description: + - The DNS server IP addresses. For CIFS AD only. + type: list + elements: str + + netbios: + description: + - The CIFS server NetBIOS name. For CIFS AD only. + type: str + + organizational_unit: + description: + - The organizational unit in which to register the CIFS server. For CIFS AD only. + type: str + + is_workgroup: + description: + - For CIFS workgroup operations, set to true. + type: bool + + server_name: + description: + - The server name. For CIFS workgroup only. + type: str + + workgroup_name: + description: + - The workgroup name. For CIFS workgroup only. + type: str + +notes: +- Support check_mode. +''' + +EXAMPLES = ''' +- name: Create cifs server with working_environment_id + netapp.cloudmanager.na_cloudmanager_cifs_server: + state: present + working_environment_id: VsaWorkingEnvironment-abcdefgh + client_id: your_client_id + refresh_token: your_refresh_token + domain: example.com + username: admin + password: pass + dns_domain: example.com + ip_addresses: ["1.0.0.0"] + netbios: cvoname + organizational_unit: CN=Computers +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppCloudmanagerCifsServer: + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + working_environment_id=dict(required=False, type='str'), + working_environment_name=dict(required=False, type='str'), + client_id=dict(required=True, type='str'), + domain=dict(required=False, type='str'), + dns_domain=dict(required=False, type='str'), + username=dict(required=False, type='str'), + password=dict(required=False, type='str', no_log=True), + ip_addresses=dict(required=False, type='list', elements='str'), + netbios=dict(required=False, type='str'), + organizational_unit=dict(required=False, type='str'), + is_workgroup=dict(required=False, type='bool'), + server_name=dict(required=False, type='str'), + workgroup_name=dict(required=False, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[ + ['refresh_token', 'sa_client_id'], + ['working_environment_name', 'working_environment_id'], + ], + required_together=[['sa_client_id', 'sa_secret_key']], + mutually_exclusive=[ + ('domain', 'server_name'), + ('dns_domain', 'server_name'), + ('username', 'server_name'), + ('password', 'server_name'), + ('ip_addresses', 'server_name'), + ('netbios', 'server_name'), + ('organizational_unit', 'server_name'), + ('domain', 'workgroup_name'), + ('dns_domain', 'workgroup_name'), + ('username', 'workgroup_name'), + ('password', 'workgroup_name'), + ('ip_addresses', 'workgroup_name'), + ('netbios', 'workgroup_name'), + ('organizational_unit', 'workgroup_name'), + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic rest_api class + self.rest_api = netapp_utils.CloudManagerRestAPI(self.module) + self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token() + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + if self.parameters.get('working_environment_id'): + working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers) + else: + working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api, + self.headers, + self.parameters['working_environment_name']) + if working_environment_detail is not None: + self.parameters['working_environment_id'] = working_environment_detail['publicId'] + else: + self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error)) + self.na_helper.set_api_root_path(working_environment_detail, self.rest_api) + + def get_cifs_server(self): + response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s/cifs" % ( + self.rest_api.api_root_path, self.parameters['working_environment_id']), None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error on get_cifs_server: %s, %s" % (str(err), str(response))) + current_cifs = dict() + if response is None or len(response) == 0: + return None + # only one cifs server exists per working environment. + for server in response: + if server.get('activeDirectoryDomain'): + current_cifs['domain'] = server['activeDirectoryDomain'] + if server.get('dnsDomain'): + current_cifs['dns_domain'] = server['dnsDomain'] + if server.get('ipAddresses'): + current_cifs['ip_addresses'] = server['ipAddresses'] + if server.get('organizationalUnit'): + current_cifs['organizational_unit'] = server['organizationalUnit'] + if server.get('netBIOS'): + current_cifs['netbios'] = server['netBIOS'] + return current_cifs + + def create_cifs_server(self): + exclude_list = ['client_id', 'domain', 'netbios', 'username', 'password'] + server = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list) + if self.parameters.get('domain'): + server['activeDirectoryDomain'] = self.parameters['domain'] + if self.parameters.get('netbios'): + server['netBIOS'] = self.parameters['netbios'] + if self.parameters.get('username'): + server['activeDirectoryUsername'] = self.parameters['username'] + if self.parameters.get('password'): + server['activeDirectoryPassword'] = self.parameters['password'] + url = "%s/working-environments/%s/cifs" % (self.rest_api.api_root_path, + self.parameters['working_environment_id']) + if self.parameters.get('is_workgroup'): + url = url + "-workgroup" + + response, err, dummy = self.rest_api.send_request("POST", url, None, server, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error on create_cifs_server failed: %s, %s" % (str(err), str(response))) + + def delete_cifs_server(self): + response, err, dummy = self.rest_api.send_request("POST", "%s/working-environments/%s/delete-cifs" % ( + self.rest_api.api_root_path, self.parameters['working_environment_id']), None, {}, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error on delete_cifs_server: %s, %s" % (str(err), str(response))) + + def apply(self): + current = self.get_cifs_server() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_cifs_server() + elif cd_action == 'delete': + self.delete_cifs_server() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + '''Main Function''' + server = NetAppCloudmanagerCifsServer() + server.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py new file mode 100644 index 000000000..b1a22829e --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py @@ -0,0 +1,655 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_connector_aws +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_connector_aws +short_description: NetApp Cloud Manager connector for AWS +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - Create or delete Cloud Manager connector for AWS. + - This module requires to be authenticated with AWS. This can be done with C(aws configure). + +options: + + state: + description: + - Whether the specified Cloud Manager connector for AWS should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the Cloud Manager connector for AWS to manage. + type: str + + instance_type: + description: + - The type of instance (for example, t3.xlarge). At least 4 CPU and 16 GB of memory are required. + type: str + default: t3.xlarge + + key_name: + description: + - The name of the key pair to use for the Connector instance. + type: str + + subnet_id: + description: + - The ID of the subnet for the instance. + type: str + + region: + required: true + description: + - The region where the Cloud Manager Connector will be created. + type: str + + instance_id: + description: + - The ID of the EC2 instance used for delete. + type: str + + client_id: + description: + - The unique client ID of the Connector. + - The connector ID. + type: str + + ami: + description: + - The image ID. + type: str + + company: + description: + - The name of the company of the user. + type: str + + security_group_ids: + description: + - The IDs of the security groups for the instance, multiple security groups can be provided separated by ','. + type: list + elements: str + + iam_instance_profile_name: + description: + - The name of the instance profile for the Connector. + type: str + + enable_termination_protection: + description: + - Indicates whether to enable termination protection on the instance. + type: bool + default: false + + associate_public_ip_address: + description: + - Indicates whether to associate a public IP address to the instance. If not provided, the association will be done based on the subnet's configuration. + type: bool + default: true + + account_id: + description: + - The NetApp tenancy account ID. + type: str + + proxy_url: + description: + - The proxy URL, if using a proxy to connect to the internet. + type: str + + proxy_user_name: + description: + - The proxy user name, if using a proxy to connect to the internet. + type: str + + proxy_password: + description: + - The proxy password, if using a proxy to connect to the internet. + type: str + + proxy_certificates: + description: + - The proxy certificates, a list of certificate file names. + type: list + elements: str + version_added: 21.5.0 + + aws_tag: + description: + - Additional tags for the AWS EC2 instance. + type: list + elements: dict + suboptions: + tag_key: + description: The key of the tag. + type: str + tag_value: + description: The tag value. + type: str + +notes: +- Support check_mode. +''' + +EXAMPLES = """ +- name: Create NetApp Cloud Manager connector for AWS + netapp.cloudmanager.na_cloudmanager_connector_aws: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: bsuhas_ansible_occm + region: us-west-1 + key_name: dev_automation + subnet_id: subnet-xxxxx + security_group_ids: [sg-xxxxxxxxxxx] + iam_instance_profile_name: OCCM_AUTOMATION + account_id: "{{ account-xxxxxxx }}" + company: NetApp + proxy_url: abc.com + proxy_user_name: xyz + proxy_password: abcxyz + proxy_certificates: [abc.crt.txt, xyz.crt.txt] + aws_tag: [ + {tag_key: abc, + tag_value: a123}] + +- name: Delete NetApp Cloud Manager connector for AWS + netapp.cloudmanager.na_cloudmanager_connector_aws: + state: absent + name: ansible + region: us-west-1 + account_id: "{{ account-xxxxxxx }}" + instance_id: i-xxxxxxxxxxxxx + client_id: xxxxxxxxxxxxxxxxxxx +""" + +RETURN = """ +ids: + description: Newly created AWS client ID in cloud manager, instance ID and account ID. + type: dict + returned: success +""" + +import traceback +import uuid +import time +import base64 + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI +IMPORT_EXCEPTION = None + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_AWS_LIB = True +except ImportError as exc: + HAS_AWS_LIB = False + IMPORT_EXCEPTION = exc + +UUID = str(uuid.uuid4()) + + +class NetAppCloudManagerConnectorAWS(object): + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + instance_type=dict(required=False, type='str', default='t3.xlarge'), + key_name=dict(required=False, type='str'), + subnet_id=dict(required=False, type='str'), + region=dict(required=True, type='str'), + instance_id=dict(required=False, type='str'), + client_id=dict(required=False, type='str'), + ami=dict(required=False, type='str'), + company=dict(required=False, type='str'), + security_group_ids=dict(required=False, type='list', elements='str'), + iam_instance_profile_name=dict(required=False, type='str'), + enable_termination_protection=dict(required=False, type='bool', default=False), + associate_public_ip_address=dict(required=False, type='bool', default=True), + account_id=dict(required=False, type='str'), + proxy_url=dict(required=False, type='str'), + proxy_user_name=dict(required=False, type='str'), + proxy_password=dict(required=False, type='str', no_log=True), + proxy_certificates=dict(required=False, type='list', elements='str'), + aws_tag=dict(required=False, type='list', elements='dict', options=dict( + tag_key=dict(type='str', no_log=False), + tag_value=dict(type='str') + )), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ['state', 'present', ['company', 'iam_instance_profile_name', 'key_name', 'security_group_ids', 'subnet_id']], + ], + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + supports_check_mode=True + ) + + if HAS_AWS_LIB is False: + self.module.fail_json(msg="the python AWS packages boto3 and botocore are required. Command is pip install boto3." + "Import error: %s" % str(IMPORT_EXCEPTION)) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = CloudManagerRestAPI(self.module) + + def get_instance(self): + """ + Get Cloud Manager connector for AWS + :return: + Dictionary of current details if Cloud Manager connector for AWS + None if Cloud Manager connector for AWS is not found + """ + + response = None + client = boto3.client('ec2', region_name=self.parameters['region']) + filters = [{'Name': 'tag:Name', 'Values': [self.parameters['name']]}, + {'Name': 'tag:OCCMInstance', 'Values': ['true']}] + + kwargs = {'Filters': filters} if self.parameters.get('instance_id') is None else {'InstanceIds': [self.parameters['instance_id']]} + + try: + response = client.describe_instances(**kwargs) + + except ClientError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + if len(response['Reservations']) == 0: + return None + + actives = [instance for reservation in response['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != 'terminated'] + if len(actives) == 1: + return actives[0] + if not actives: + return None + self.module.fail_json(msg="Error: found multiple instances for name=%s: %s" % (self.parameters['name'], str(actives))) + + def get_ami(self): + """ + Get AWS EC2 Image + :return: + Latest AMI + """ + + instance_ami = None + client = boto3.client('ec2', region_name=self.parameters['region']) + + try: + instance_ami = client.describe_images( + Filters=[ + { + 'Name': 'name', + 'Values': [ + self.rest_api.environment_data['AMI_FILTER'], + ] + }, + ], + Owners=[ + self.rest_api.environment_data['AWS_ACCOUNT'], + ], + ) + except ClientError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + latest_date = instance_ami['Images'][0]['CreationDate'] + latest_ami = instance_ami['Images'][0]['ImageId'] + + for image in instance_ami['Images']: + if image['CreationDate'] > latest_date: + latest_date = image['CreationDate'] + latest_ami = image['ImageId'] + + return latest_ami + + def create_instance(self): + """ + Create Cloud Manager connector for AWS + :return: client_id, instance_id + """ + + if self.parameters.get('ami') is None: + self.parameters['ami'] = self.get_ami() + + user_data, client_id = self.register_agent_to_service() + + ec2 = boto3.client('ec2', region_name=self.parameters['region']) + + tags = [ + { + 'Key': 'Name', + 'Value': self.parameters['name'] + }, + { + 'Key': 'OCCMInstance', + 'Value': 'true' + }, + ] + + if self.parameters.get('aws_tag') is not None: + for each_tag in self.parameters['aws_tag']: + tag = { + 'Key': each_tag['tag_key'], + 'Value': each_tag['tag_value'] + } + + tags.append(tag) + + instance_input = { + 'BlockDeviceMappings': [ + { + 'DeviceName': '/dev/sda1', + 'Ebs': { + 'Encrypted': True, + 'VolumeSize': 100, + 'VolumeType': 'gp2', + }, + }, + ], + 'ImageId': self.parameters['ami'], + 'MinCount': 1, + 'MaxCount': 1, + 'KeyName': self.parameters['key_name'], + 'InstanceType': self.parameters['instance_type'], + 'DisableApiTermination': self.parameters['enable_termination_protection'], + 'TagSpecifications': [ + { + 'ResourceType': 'instance', + 'Tags': tags + }, + ], + 'IamInstanceProfile': { + 'Name': self.parameters['iam_instance_profile_name'] + }, + 'UserData': user_data + } + + if self.parameters.get('associate_public_ip_address') is True: + instance_input['NetworkInterfaces'] = [ + { + 'AssociatePublicIpAddress': self.parameters['associate_public_ip_address'], + 'DeviceIndex': 0, + 'SubnetId': self.parameters['subnet_id'], + 'Groups': self.parameters['security_group_ids'] + } + ] + else: + instance_input['SubnetId'] = self.parameters['subnet_id'] + instance_input['SecurityGroupIds'] = self.parameters['security_group_ids'] + + try: + result = ec2.run_instances(**instance_input) + except ClientError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + # Sleep for 2 minutes + time.sleep(120) + retries = 16 + while retries > 0: + agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id) + if error is not None: + self.module.fail_json( + msg="Error: not able to get occm status: %s, %s" % (str(error), str(agent)), + client_id=client_id, instance_id=result['Instances'][0]['InstanceId']) + if agent['status'] == "active": + break + else: + time.sleep(30) + retries -= 1 + if retries == 0: + # Taking too long for status to be active + return self.module.fail_json(msg="Error: taking too long for OCCM agent to be active or not properly setup") + + return client_id, result['Instances'][0]['InstanceId'] + + def get_vpc(self): + """ + Get vpc + :return: vpc ID + """ + + vpc_result = None + ec2 = boto3.client('ec2', region_name=self.parameters['region']) + + vpc_input = {'SubnetIds': [self.parameters['subnet_id']]} + + try: + vpc_result = ec2.describe_subnets(**vpc_input) + except ClientError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + return vpc_result['Subnets'][0]['VpcId'] + + def set_account_id(self): + if self.parameters.get('account_id') is None: + response, error = self.na_helper.get_or_create_account(self.rest_api) + if error is not None: + return error + self.parameters['account_id'] = response + return None + + def register_agent_to_service(self): + """ + Register agent to service and collect userdata by setting up connector + :return: UserData, ClientID + """ + + vpc = self.get_vpc() + + if self.parameters.get('account_id') is None: + error = self.set_account_id() + if error is not None: + self.module.fail_json(msg="Error: failed to get account: %s." % str(error)) + + headers = { + "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token, + "X-Service-Request-Id": "111" + } + body = { + "accountId": self.parameters['account_id'], + "name": self.parameters['name'], + "company": self.parameters['company'], + "placement": { + "provider": "AWS", + "region": self.parameters['region'], + "network": vpc, + "subnet": self.parameters['subnet_id'], + }, + "extra": { + "proxy": { + "proxyUrl": self.parameters.get('proxy_url'), + "proxyUserName": self.parameters.get('proxy_user_name'), + "proxyPassword": self.parameters.get('proxy_password') + } + } + } + + register_api = '/agents-mgmt/connector-setup' + response, error, dummy = self.rest_api.post(register_api, body, header=headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on connector setup: %s, %s" % (str(error), str(response))) + client_id = response['clientId'] + client_secret = response['clientSecret'] + + u_data = { + 'instanceName': self.parameters['name'], + 'company': self.parameters['company'], + 'clientId': client_id, + 'clientSecret': client_secret, + 'systemId': UUID, + 'tenancyAccountId': self.parameters['account_id'], + 'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'), + 'proxyUserName': self.parameters.get('proxy_user_name'), + 'proxyUrl': self.parameters.get('proxy_url'), + }, + 'localAgent': True + } + + if self.parameters.get('proxy_certificates') is not None: + proxy_certificates = [] + for certificate_file in self.parameters['proxy_certificates']: + encoded_certificate, error = self.na_helper.encode_certificates(certificate_file) + if error: + self.module.fail_json(msg="Error: could not open/read file '%s' of proxy_certificates: %s" % (certificate_file, error)) + proxy_certificates.append(encoded_certificate) + + if proxy_certificates: + u_data['proxySettings']['proxyCertificates'] = proxy_certificates + + user_data = self.na_helper.convert_data_to_tabbed_jsonstring(u_data) + + return user_data, client_id + + def delete_instance(self): + """ + Delete OCCM instance + :return: + None + """ + + ec2 = boto3.client('ec2', region_name=self.parameters['region']) + try: + ec2.terminate_instances( + InstanceIds=[ + self.parameters['instance_id'], + ], + ) + except ClientError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + if 'client_id' not in self.parameters: + return None + + retries = 30 + while retries > 0: + agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id']) + if error is not None: + return "Error: not able to get occm agent status after deleting instance: %s, %s." % (str(error), str(agent)) + if agent['status'] != "active": + break + else: + time.sleep(10) + retries -= 1 + if retries == 0: + # Taking too long for terminating OCCM + return "Error: taking too long for instance to finish terminating." + return None + + def get_occm_agents(self): + if 'client_id' in self.parameters: + agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id']) + if str(error) == '403' and 'Action not allowed for user' in str(agent): + # assume the agent does not exist anymore + agents, error = [], None + self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id']) + else: + agents = [agent] + else: + self.set_account_id() + if 'account_id' in self.parameters: + agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'], + self.parameters['name'], 'AWS') + else: + self.module.warn('Without account_id, some agents may still exist.') + agents, error = [], None + if error: + self.module.fail_json( + msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents))) + return agents + + def set_client_id(self): + agents = self.get_occm_agents() + client_id = self.parameters.get('client_id') + if client_id is None: + active_client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent and agent['status'] == 'active'] + if len(active_client_ids) == 1: + client_id = active_client_ids[0] + self.parameters['client_id'] = client_id + return client_id, agents + + def delete_occm_agents(self, agents): + error = self.na_helper.delete_occm_agents(self.rest_api, agents) + if error: + return "Error: deleting OCCM agent(s): %s" % error + return None + + def apply(self): + """ + Apply action to the Cloud Manager connector for AWS + :return: None + """ + results = { + 'account_id': None, + 'client_id': None, + 'instance_id': None + } + agents = None + current = self.get_instance() + if current or self.parameters['state'] == 'absent': + if self.parameters.get('instance_id') is None and current: + self.parameters['instance_id'] = current['InstanceId'] + results['instance_id'] = self.parameters.get('instance_id') + results['client_id'], agents = self.set_client_id() + if current is None and agents: + # it's possible the VM instance does not exist, but the clients are still present. + current = agents + + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + results['modify'] = 'Note: modifying an existing connector is not supported at this time.' + + if not self.module.check_mode and self.na_helper.changed: + if cd_action == 'create': + results['client_id'], results['instance_id'] = self.create_instance() + elif cd_action == 'delete': + errors = [] + if self.parameters.get('instance_id'): + errors.append(self.delete_instance()) + if agents: + errors.append(self.delete_occm_agents(agents)) + errors = [error for error in errors if error] + if errors: + self.module.fail_json(msg='Errors deleting instance or client: %s' % ', '.join(errors)) + + results['account_id'] = self.parameters.get('account_id') + results['changed'] = self.na_helper.changed + self.module.exit_json(**results) + + +def main(): + """ + Create Cloud Manager connector for AWS class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerConnectorAWS() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py new file mode 100644 index 000000000..6f1d30a32 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py @@ -0,0 +1,591 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_connector_azure +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_cloudmanager_connector_azure +short_description: NetApp Cloud Manager connector for Azure. +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or delete Cloud Manager connector for Azure. + +options: + + state: + description: + - Whether the specified Cloud Manager connector for Azure should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the Cloud Manager connector for Azure to manage. + type: str + + virtual_machine_size: + description: + - The virtual machine type. (for example, Standard_DS3_v2). + - At least 4 CPU and 16 GB of memory are required. + type: str + default: Standard_DS3_v2 + + resource_group: + required: true + description: + - The resource group in Azure where the resources will be created. + type: str + + subnet_name: + required: true + description: + - The name of the subnet for the virtual machine. + - For example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/xxx/subnets/default, + only default is needed. + aliases: + - subnet_id + type: str + version_added: '21.7.0' + + location: + required: true + description: + - The location where the Cloud Manager Connector will be created. + type: str + + client_id: + description: + - The unique client ID of the Connector. + - The connector ID. + type: str + + subscription_id: + required: true + description: + - The ID of the Azure subscription. + type: str + + company: + required: true + description: + - The name of the company of the user. + type: str + + vnet_name: + required: true + description: + - The name of the virtual network. + - for example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/default, + only default is needed. + aliases: + - vnet_id + type: str + version_added: '21.7.0' + + vnet_resource_group: + description: + - The resource group in Azure associated with the virtual network. + - If not provided, its assumed that the VNet is within the previously specified resource group. + type: str + + network_security_resource_group: + description: + - The resource group in Azure associated with the security group. + - If not provided, its assumed that the security group is within the previously specified resource group. + type: str + + network_security_group_name: + required: true + description: + - The name of the security group for the deployment. + type: str + + proxy_certificates: + description: + - The proxy certificates, a list of certificate file names. + type: list + elements: str + + associate_public_ip_address: + description: + - Indicates whether to associate the public IP address to the virtual machine. + type: bool + default: true + + account_id: + required: true + description: + - The NetApp tenancy account ID. + type: str + + proxy_url: + description: + - The proxy URL, if using a proxy to connect to the internet. + type: str + + proxy_user_name: + description: + - The proxy user name, if using a proxy to connect to the internet. + type: str + + proxy_password: + description: + - The proxy password, if using a proxy to connect to the internet. + type: str + + admin_username: + required: true + description: + - The user name for the Connector. + type: str + + admin_password: + required: true + description: + - The password for the Connector. + type: str + + storage_account: + description: + - The storage account can be created automatically. + - When C(storage_account) is not set, the name is constructed by appending 'sa' to the connector C(name). + - Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only. + type: str + version_added: '21.17.0' +''' + +EXAMPLES = """ +- name: Create NetApp Cloud Manager connector for Azure. + netapp.cloudmanager.na_cloudmanager_connector_azure: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: bsuhas_ansible_occm + location: westus + resource_group: occm_group_westus + subnet_name: subnetxxxxx + vnet_name: Vnetxxxxx + subscription_id: "{{ xxxxxxxxxxxxxxxxx }}" + account_id: "{{ account-xxxxxxx }}" + company: NetApp + admin_password: Netapp123456 + admin_username: bsuhas + network_security_group_name: OCCM_SG + proxy_url: abc.com + proxy_user_name: xyz + proxy_password: abcxyz + proxy_certificates: [abc.crt.txt, xyz.crt.txt] + +- name: Delete NetApp Cloud Manager connector for Azure. + netapp.cloudmanager.na_cloudmanager_connector_azure: + state: absent + name: ansible + location: westus + resource_group: occm_group_westus + network_security_group_name: OCCM_SG + subnet_name: subnetxxxxx + company: NetApp + admin_password: Netapp123456 + admin_username: bsuhas + vnet_name: Vnetxxxxx + subscription_id: "{{ xxxxxxxxxxxxxxxxx }}" + account_id: "{{ account-xxxxxxx }}" + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + client_id: xxxxxxxxxxxxxxxxxxx +""" + +RETURN = """ +msg: + description: Newly created Azure connector id in cloud manager. + type: str + returned: success + sample: 'xxxxxxxxxxxxxxxx' +""" + +import traceback +import time +import base64 +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + +IMPORT_EXCEPTION = None + +try: + from azure.mgmt.resource import ResourceManagementClient + from azure.mgmt.compute import ComputeManagementClient + from azure.mgmt.network import NetworkManagementClient + from azure.mgmt.storage import StorageManagementClient + from azure.mgmt.resource.resources.models import Deployment + from azure.common.client_factory import get_client_from_cli_profile + from msrestazure.azure_exceptions import CloudError + HAS_AZURE_LIB = True +except ImportError as exc: + HAS_AZURE_LIB = False + IMPORT_EXCEPTION = exc + + +class NetAppCloudManagerConnectorAzure(object): + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + virtual_machine_size=dict(required=False, type='str', default='Standard_DS3_v2'), + resource_group=dict(required=True, type='str'), + subscription_id=dict(required=True, type='str'), + subnet_name=dict(required=True, type='str', aliases=['subnet_id']), + vnet_name=dict(required=True, type='str', aliases=['vnet_id']), + vnet_resource_group=dict(required=False, type='str'), + location=dict(required=True, type='str'), + network_security_resource_group=dict(required=False, type='str'), + network_security_group_name=dict(required=True, type='str'), + client_id=dict(required=False, type='str'), + company=dict(required=True, type='str'), + proxy_certificates=dict(required=False, type='list', elements='str'), + associate_public_ip_address=dict(required=False, type='bool', default=True), + account_id=dict(required=True, type='str'), + proxy_url=dict(required=False, type='str'), + proxy_user_name=dict(required=False, type='str'), + proxy_password=dict(required=False, type='str', no_log=True), + admin_username=dict(required=True, type='str'), + admin_password=dict(required=True, type='str', no_log=True), + storage_account=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ['state', 'absent', ['client_id']] + ], + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + supports_check_mode=True + ) + + if HAS_AZURE_LIB is False: + self.module.fail_json(msg="the python AZURE library azure.mgmt and azure.common is required. Command is pip install azure-mgmt, azure-common." + " Import error: %s" % str(IMPORT_EXCEPTION)) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if 'storage_account' not in self.parameters or self.parameters['storage_account'] == "": + self.parameters['storage_account'] = self.parameters['name'].lower() + 'sa' + self.rest_api = CloudManagerRestAPI(self.module) + + def get_deploy_azure_vm(self): + """ + Get Cloud Manager connector for AZURE + :return: + Dictionary of current details if Cloud Manager connector for AZURE + None if Cloud Manager connector for AZURE is not found + """ + + exists = False + + resource_client = get_client_from_cli_profile(ResourceManagementClient) + try: + exists = resource_client.deployments.check_existence(self.parameters['resource_group'], self.parameters['name']) + + except CloudError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + if not exists: + return None + + return exists + + def deploy_azure(self): + """ + Create Cloud Manager connector for Azure + :return: client_id + """ + + user_data, client_id = self.register_agent_to_service() + template = json.loads(self.na_helper.call_template()) + params = json.loads(self.na_helper.call_parameters()) + params['adminUsername']['value'] = self.parameters['admin_username'] + params['adminPassword']['value'] = self.parameters['admin_password'] + params['customData']['value'] = json.dumps(user_data) + params['location']['value'] = self.parameters['location'] + params['virtualMachineName']['value'] = self.parameters['name'] + params['storageAccount']['value'] = self.parameters['storage_account'] + if self.rest_api.environment == 'stage': + params['environment']['value'] = self.rest_api.environment + if '/subscriptions' in self.parameters['vnet_name']: + network = self.parameters['vnet_name'] + else: + if self.parameters.get('vnet_resource_group') is not None: + network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % ( + self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name']) + else: + network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % ( + self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name']) + + if '/subscriptions' in self.parameters['subnet_name']: + subnet = self.parameters['subnet_name'] + else: + subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name']) + + if self.parameters.get('network_security_resource_group') is not None: + network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % ( + self.parameters['subscription_id'], self.parameters['network_security_resource_group'], self.parameters['network_security_group_name']) + else: + network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % ( + self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['network_security_group_name']) + + params['virtualNetworkId']['value'] = network + params['networkSecurityGroupName']['value'] = network_security_group_name + params['virtualMachineSize']['value'] = self.parameters['virtual_machine_size'] + params['subnetId']['value'] = subnet + + try: + resource_client = get_client_from_cli_profile(ResourceManagementClient) + + resource_client.resource_groups.create_or_update( + self.parameters['resource_group'], + {"location": self.parameters['location']}) + + deployment_properties = { + 'mode': 'Incremental', + 'template': template, + 'parameters': params + } + resource_client.deployments.begin_create_or_update( + self.parameters['resource_group'], + self.parameters['name'], + Deployment(properties=deployment_properties) + ) + + except CloudError as error: + self.module.fail_json(msg="Error in deploy_azure: %s" % to_native(error), exception=traceback.format_exc()) + + # Sleep for 2 minutes + time.sleep(120) + retries = 30 + while retries > 0: + occm_resp, error = self.na_helper.check_occm_status(self.rest_api, client_id) + if error is not None: + self.module.fail_json( + msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp))) + if occm_resp['agent']['status'] == "active": + break + else: + time.sleep(30) + retries -= 1 + if retries == 0: + # Taking too long for status to be active + return self.module.fail_json(msg="Taking too long for OCCM agent to be active or not properly setup") + + try: + compute_client = get_client_from_cli_profile(ComputeManagementClient) + vm = compute_client.virtual_machines.get(self.parameters['resource_group'], self.parameters['name']) + except CloudError as error: + return self.module.fail_json(msg="Error in deploy_azure (get identity): %s" % to_native(error), exception=traceback.format_exc()) + + principal_id = vm.identity.principal_id + return client_id, principal_id + + def register_agent_to_service(self): + """ + Register agent to service and collect userdata by setting up connector + :return: UserData, ClientID + """ + + if '/subscriptions' in self.parameters['vnet_name']: + network = self.parameters['vnet_name'] + else: + if self.parameters.get('vnet_resource_group') is not None: + network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % ( + self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name']) + else: + network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % ( + self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name']) + + if '/subscriptions' in self.parameters['subnet_name']: + subnet = self.parameters['subnet_name'] + else: + subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name']) + + if self.parameters.get('account_id') is None: + response, error = self.na_helper.get_or_create_account(self.rest_api) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response))) + self.parameters['account_id'] = response + + headers = { + "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token, + } + body = { + "accountId": self.parameters['account_id'], + "name": self.parameters['name'], + "company": self.parameters['company'], + "placement": { + "provider": "AZURE", + "region": self.parameters['location'], + "network": network, + "subnet": subnet, + }, + "extra": { + "proxy": { + "proxyUrl": self.parameters.get('proxy_url'), + "proxyUserName": self.parameters.get('proxy_user_name'), + "proxyPassword": self.parameters.get('proxy_password') + } + } + } + + register_url = "%s/agents-mgmt/connector-setup" % self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + response, error, dummy = self.rest_api.post(register_url, body, header=headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on getting userdata for connector setup: %s, %s" % (str(error), str(response))) + client_id = response['clientId'] + + proxy_certificates = [] + if self.parameters.get('proxy_certificates') is not None: + for each in self.parameters['proxy_certificates']: + try: + data = open(each, "r").read() + except OSError: + self.module.fail_json(msg="Error: Could not open/read file of proxy_certificates: %s" % str(each)) + + encoded_certificate = base64.b64encode(data) + proxy_certificates.append(encoded_certificate) + + if proxy_certificates: + response['proxySettings']['proxyCertificates'] = proxy_certificates + + return response, client_id + + def delete_azure_occm(self): + """ + Delete OCCM + :return: + None + """ + # delete vm deploy + try: + compute_client = get_client_from_cli_profile(ComputeManagementClient) + vm_delete = compute_client.virtual_machines.begin_delete( + self.parameters['resource_group'], + self.parameters['name']) + while not vm_delete.done(): + vm_delete.wait(2) + except CloudError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + # delete interfaces deploy + try: + network_client = get_client_from_cli_profile(NetworkManagementClient) + interface_delete = network_client.network_interfaces.begin_delete( + self.parameters['resource_group'], + self.parameters['name'] + '-nic') + while not interface_delete.done(): + interface_delete.wait(2) + except CloudError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + # delete storage account deploy + try: + storage_client = get_client_from_cli_profile(StorageManagementClient) + storage_client.storage_accounts.delete( + self.parameters['resource_group'], + self.parameters['storage_account']) + except CloudError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + # delete storage account deploy + try: + network_client = get_client_from_cli_profile(NetworkManagementClient) + public_ip_addresses_delete = network_client.public_ip_addresses.begin_delete( + self.parameters['resource_group'], + self.parameters['name'] + '-ip') + while not public_ip_addresses_delete.done(): + public_ip_addresses_delete.wait(2) + except CloudError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + # delete deployment + try: + resource_client = get_client_from_cli_profile(ResourceManagementClient) + deployments_delete = resource_client.deployments.begin_delete( + self.parameters['resource_group'], + self.parameters['name'] + '-ip') + while not deployments_delete.done(): + deployments_delete.wait(5) + except CloudError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + retries = 16 + while retries > 0: + occm_resp, error = self.na_helper.check_occm_status(self.rest_api, + self.parameters['client_id']) + if error is not None: + self.module.fail_json( + msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp))) + if occm_resp['agent']['status'] != "active": + break + else: + time.sleep(10) + retries -= 1 + if retries == 0: + # Taking too long for terminating OCCM + return self.module.fail_json(msg="Taking too long for instance to finish terminating") + client = self.rest_api.format_client_id(self.parameters['client_id']) + error = self.na_helper.delete_occm_agents(self.rest_api, [{'agentId': client}]) + if error: + self.module.fail_json(msg="Error: unexpected response on deleting OCCM: %s" % (str(error))) + + def apply(self): + """ + Apply action to the Cloud Manager connector for AZURE + :return: None + """ + client_id = None + principal_id = None + if not self.module.check_mode: + if self.parameters['state'] == 'present': + client_id, principal_id = self.deploy_azure() + self.na_helper.changed = True + elif self.parameters['state'] == 'absent': + get_deploy = self.get_deploy_azure_vm() + if get_deploy: + self.delete_azure_occm() + self.na_helper.changed = True + + self.module.exit_json(changed=self.na_helper.changed, msg={'client_id': client_id, 'principal_id': principal_id}) + + +def main(): + """ + Create Cloud Manager connector for AZURE class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerConnectorAzure() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py new file mode 100644 index 000000000..bea686f4c --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py @@ -0,0 +1,644 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_connector_gcp +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_connector_gcp +short_description: NetApp Cloud Manager connector for GCP. +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - Create or delete Cloud Manager connector for GCP. + +options: + state: + description: + - Whether the specified Cloud Manager connector for GCP should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the Cloud Manager connector for GCP to manage. + type: str + + project_id: + description: + - The GCP project_id where the connector will be created. + required: true + type: str + + zone: + description: + - The GCP zone where the Connector will be created. + required: true + type: str + + gcp_service_account_email: + description: + - The email of the service_account for the connector instance. This service account is used to allow the Connector to create Cloud Volume ONTAP. + required: true + type: str + aliases: ['service_account_email'] + version_added: 21.7.0 + + company: + description: + - The name of the company of the user. + required: true + type: str + + gcp_service_account_path: + description: + - The local path of the service_account JSON file for GCP authorization purposes. This service account is used to create the Connector in GCP. + type: str + aliases: ['service_account_path'] + version_added: 21.7.0 + + subnet_id: + description: + - The name of the subnet for the virtual machine. + type: str + default: default + + network_project_id: + description: + - The project id in GCP associated with the Subnet. If not provided, it is assumed that the Subnet is within the previously specified project id. + type: str + + machine_type: + description: + - The machine_type for the Connector VM. + type: str + default: n2-standard-4 + + firewall_tags: + description: + - Indicates whether to add firewall_tags to the connector VM (HTTP and HTTP). + type: bool + default: true + + associate_public_ip: + description: + - Indicates whether to associate a public IP address to the virtual machine. + type: bool + default: true + + proxy_url: + description: + - The proxy URL, if using a proxy to connect to the internet. + type: str + + proxy_user_name: + description: + - The proxy user name, if using a proxy to connect to the internet. + type: str + + proxy_password: + description: + - The proxy password, if using a proxy to connect to the internet. + type: str + + proxy_certificates: + description: + - The proxy certificates. A list of certificate file names. + type: list + elements: str + + account_id: + description: + - The NetApp account ID that the Connector will be associated with. + - If not provided, Cloud Manager uses the first account. If no account exists, Cloud Manager creates a new account. + - You can find the account ID in the account tab of Cloud Manager at [https://cloudmanager.netapp.com](https://cloudmanager.netapp.com). + type: str + + client_id: + description: + - The client ID of the Cloud Manager Connector. + - The connector ID. + - If state is absent, the client id is used to identify the agent and delete it. + - If state is absent and this parameter is not set, all agents associated with C(name) are deleted. + - Ignored when state is present. + type: str + +''' + +EXAMPLES = """ +- name: Create NetApp Cloud Manager connector for GCP + netapp.cloudmanager.na_cloudmanager_connector_gcp: + state: present + name: ansible-occm-gcp + project_id: xxxxxxx-support + zone: us-east4-b + company: NetApp + gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com + gcp_service_account_path: gcp_creds.json + proxy_user_name: test + proxy_password: test + proxy_url: http://abcdefg.com + proxy_certificates: ["D-TRUST_Root_Class_3_CA_2_2009.crt", "DigiCertGlobalRootCA.crt", "DigiCertGlobalRootG2.crt"] + account_id: account-xxxxXXXX + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + +- name: Delete NetApp Cloud Manager connector for GCP + netapp.cloudmanager.na_cloudmanager_connector_gcp: + state: absent + name: ansible-occm-gcp + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + client_id: "{{ wwwwwwwwww }}" + project_id: xxxxxxx-support + zone: us-east4-b + company: NetApp + gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com + gcp_service_account_path: gcp_creds.json + account_id: account-xxxxXXXX +""" + +RETURN = """ +client_id: + description: Newly created GCP connector id on cloud manager. + type: str + returned: success + sample: 'FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW' +client_ids: + description: + - a list of client ids matching the name and provider if the connector already exists. + - ideally the list should be empty, or contain a single element matching client_id. + type: list + elements: str + returned: success + sample: ['FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW'] +""" +import uuid +import time +import base64 +import json + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + +IMPORT_ERRORS = [] +HAS_GCP_COLLECTION = False + +try: + import google.auth + from google.auth.transport import requests + from google.oauth2 import service_account + import yaml + HAS_GCP_COLLECTION = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +GCP_DEPLOYMENT_MANAGER = "www.googleapis.com" +UUID = str(uuid.uuid4()) + + +class NetAppCloudManagerConnectorGCP(object): + ''' object initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + project_id=dict(required=True, type='str'), + zone=dict(required=True, type='str'), + company=dict(required=True, type='str'), + gcp_service_account_email=dict(required=True, type='str', aliases=['service_account_email']), + gcp_service_account_path=dict(required=False, type='str', aliases=['service_account_path']), + subnet_id=dict(required=False, type='str', default='default'), + network_project_id=dict(required=False, type='str'), + machine_type=dict(required=False, type='str', default='n2-standard-4'), + firewall_tags=dict(required=False, type='bool', default=True), + associate_public_ip=dict(required=False, type='bool', default=True), + proxy_url=dict(required=False, type='str'), + proxy_user_name=dict(required=False, type='str'), + proxy_password=dict(required=False, type='str', no_log=True), + proxy_certificates=dict(required=False, type='list', elements='str'), + account_id=dict(required=False, type='str'), + client_id=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = CloudManagerRestAPI(self.module) + self.gcp_common_suffix_name = "-vm-boot-deployment" + self.fail_when_import_errors(IMPORT_ERRORS, HAS_GCP_COLLECTION) + super(NetAppCloudManagerConnectorGCP, self).__init__() + + self.rest_api.gcp_token, error = self.get_gcp_token() + if error: + self.module.fail_json(msg='Error getting gcp token: %s' % repr(error)) + + def get_gcp_token(self): + ''' + get gcp token from gcp service account credential json file + ''' + scopes = ["https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/ndev.cloudman", + "https://www.googleapis.com/auth/ndev.cloudman.readonly", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write"] + if 'gcp_service_account_path' in self.parameters: + try: + fh = open(self.parameters['gcp_service_account_path']) + except (OSError, IOError) as error: + return None, "opening %s: got: %s" % (self.parameters['gcp_service_account_path'], repr(error)) + with fh: + key_bytes = json.load(fh) + if key_bytes is None: + return None, "Error: gcp_service_account_path file is empty" + credentials = service_account.Credentials.from_service_account_file(self.parameters['gcp_service_account_path'], scopes=scopes) + else: + credentials, project = google.auth.default(scopes=scopes) + + credentials.refresh(requests.Request()) + + return credentials.token, None + + def fail_when_import_errors(self, import_errors, has_gcp_collection=True): + if has_gcp_collection and not import_errors: + return + msg = '' + if not has_gcp_collection: + msg = 'The python google-auth package is required. ' + msg += 'Import errors: %s' % str(import_errors) + self.module.fail_json(msg=msg) + + def get_deploy_vm(self): + ''' + Get Cloud Manager connector for GCP + :return: + Dictionary of current details if Cloud Manager connector for GCP + None if Cloud Manager connector for GCP is not found + ''' + api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % ( + self.parameters['project_id'], self.parameters['name'], self.gcp_common_suffix_name) + headers = { + "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token, + 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token, + } + + occm_status, error, dummy = self.rest_api.get(api_url, header=headers) + if error is not None: + if error == '404' and b'is not found' in occm_status: + return None + self.module.fail_json( + msg="Error: unexpected response on getting occm: %s, %s" % (str(error), str(occm_status))) + + return occm_status + + def get_custom_data_for_gcp(self, proxy_certificates): + ''' + get custom data for GCP + ''' + # get account ID + if 'account_id' not in self.parameters: + # get account ID + response, error = self.na_helper.get_or_create_account(self.rest_api) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response))) + self.parameters['account_id'] = response + # registerAgentTOServiceForGCP + response, error = self.na_helper.register_agent_to_service(self.rest_api, "GCP", "") + if error is not None: + self.module.fail_json( + msg="Error: register agent to service for gcp failed: %s, %s" % (str(error), str(response))) + # add proxy_certificates as part of json data + client_id = response['clientId'] + client_secret = response['clientSecret'] + u_data = { + 'instanceName': self.parameters['name'], + 'company': self.parameters['company'], + 'clientId': client_id, + 'clientSecret': client_secret, + 'systemId': UUID, + 'tenancyAccountId': self.parameters['account_id'], + 'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'), + 'proxyUserName': self.parameters.get('proxy_user_name'), + 'proxyUrl': self.parameters.get('proxy_url'), + 'proxyCertificates': proxy_certificates, + }, + } + # convert response to json format + user_data = json.dumps(u_data) + return user_data, client_id, None + + def deploy_gcp_vm(self, proxy_certificates): + ''' + deploy GCP VM + ''' + # getCustomDataForGCP + response, client_id, error = self.get_custom_data_for_gcp(proxy_certificates) + if error is not None: + self.module.fail_json( + msg="Error: Not able to get user data for GCP: %s, %s" % (str(error), str(response))) + # compose + user_data = response + gcp_custom_data = base64.b64encode(user_data.encode()) + gcp_sa_scopes = ["https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/ndev.cloudman", + "https://www.googleapis.com/auth/ndev.cloudman.readonly"] + + tags = [] + if self.parameters['firewall_tags'] is True: + tags = {'items': ['firewall-tag-bvsu', 'http-server', 'https-server']} + + # first resource + device_name = self.parameters['name'] + '-vm-disk-boot' + t = { + 'name': self.parameters['name'] + '-vm', + 'properties': { + 'disks': [ + {'autoDelete': True, + 'boot': True, + 'deviceName': device_name, + 'name': device_name, + 'source': "\\\"$(ref.%s.selfLink)\\\"" % device_name, + 'type': "PERSISTENT", + }, + ], + 'machineType': "zones/%s/machineTypes/%s" % (self.parameters['zone'], self.parameters['machine_type']), + 'metadata': { + 'items': [ + {'key': 'serial-port-enable', + 'value': 1}, + {'key': 'customData', + 'value': gcp_custom_data} + ] + }, + 'serviceAccounts': [{'email': self.parameters['gcp_service_account_email'], + 'scopes': gcp_sa_scopes, }], + 'tags': tags, + 'zone': self.parameters['zone'] + }, + 'metadata': {'dependsOn': [device_name]}, + 'type': 'compute.v1.instance', + } + + access_configs = [] + if self.parameters['associate_public_ip'] is True: + access_configs = [{'kind': 'compute#accessConfig', + 'name': 'External NAT', + 'type': 'ONE_TO_ONE_NAT', + 'networkTier': 'PREMIUM' + }] + project_id = self.parameters['project_id'] + if self.parameters.get('network_project_id'): + project_id = self.parameters['network_project_id'] + + t['properties']['networkInterfaces'] = [ + {'accessConfigs': access_configs, + 'kind': 'compute#networkInterface', + 'subnetwork': 'projects/%s/regions/%s/subnetworks/%s' % ( + project_id, self.parameters['region'], self.parameters['subnet_id']) + }] + + td = { + 'name': device_name, + 'properties': {'name': device_name, + 'sizeGb': 100, + 'sourceImage': 'projects/%s/global/images/family/%s' % (self.rest_api.environment_data['GCP_IMAGE_PROJECT'], + self.rest_api.environment_data['GCP_IMAGE_FAMILY']), + 'type': 'zones/%s/diskTypes/pd-ssd' % (self.parameters['zone']), + 'zone': self.parameters['zone'] + }, + 'type': 'compute.v1.disks', + } + content = { + 'resources': [t, td] + } + my_data = str(yaml.dump(content)) + # The template must be in this format: + # { + # "name": "ansible-cycc-vm-boot-deployment", + # "target": { + # "config": { + # "content": "resources: + # - name: xxxx + # properties: + # ... + # " + # } + # } + # } + gcp_deployment_template = '{\n "name": "%s%s",\n "target": {\n "config": {\n "content": "%s"\n }\n}\n}' % ( + self.parameters['name'], '-vm-boot-deployment', my_data) + + # post + api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments' % ( + self.parameters['project_id']) + + headers = { + 'X-User-Token': self.rest_api.token_type + " " + self.rest_api.gcp_token, + 'X-Tenancy-Account-Id': self.parameters['account_id'], + 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token, + 'Content-type': "application/json", + 'Referer': "Ansible_NetApp", + 'X-Agent-Id': self.rest_api.format_client_id(client_id) + } + + response, error, dummy = self.rest_api.post(api_url, data=gcp_deployment_template, header=headers, + gcp_type=True) + if error is not None: + return response, client_id, error + + # check occm status + # Sleep for 1 minutes + time.sleep(60) + retries = 16 + while retries > 0: + agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id) + if error is not None: + self.module.fail_json( + msg="Error: Not able to get occm status: %s, %s" % (str(error), str(agent)), + client_id=client_id, changed=True) + if agent['status'] == "active": + break + else: + time.sleep(30) + retries -= 1 + if retries == 0: + # Taking too long for status to be active + msg = "Connector VM is created and registered. Taking too long for OCCM agent to be active or not properly setup." + msg += ' Latest status: %s' % agent + self.module.fail_json(msg=msg, client_id=client_id, changed=True) + + return response, client_id, error + + def create_occm_gcp(self): + ''' + Create Cloud Manager connector for GCP + ''' + # check proxy configuration + if 'proxy_user_name' in self.parameters and 'proxy_url' not in self.parameters: + self.module.fail_json(msg="Error: missing proxy_url") + if 'proxy_password' in self.parameters and 'proxy_url' not in self.parameters: + self.module.fail_json(msg="Error: missing proxy_url") + + proxy_certificates = [] + if 'proxy_certificates' in self.parameters: + for c_file in self.parameters['proxy_certificates']: + proxy_certificate, error = self.na_helper.encode_certificates(c_file) + # add to proxy_certificates list + if error is not None: + self.module.fail_json(msg="Error: not able to read certificate file %s" % c_file) + proxy_certificates.append(proxy_certificate) + # region is the super class of zone. For example, zone us-east4-b is one of the zone in region us-east4 + self.parameters['region'] = self.parameters['zone'][:-2] + # deploy GCP VM + response, client_id, error = self.deploy_gcp_vm(proxy_certificates) + if error is not None: + self.module.fail_json( + msg="Error: create_occm_gcp: %s, %s" % (str(error), str(response))) + return client_id + + def delete_occm_gcp(self): + ''' + Delete Cloud Manager connector for GCP + ''' + api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % ( + self.parameters['project_id'], + self.parameters['name'], + self.gcp_common_suffix_name) + headers = { + "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token, + 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token, + 'X-Tenancy-Account-Id': self.parameters['account_id'], + 'Content-type': "application/json", + 'Referer': "Ansible_NetApp", + } + + response, error, dummy = self.rest_api.delete(api_url, None, header=headers) + if error is not None: + return "Error: unexpected response on deleting VM: %s, %s" % (str(error), str(response)) + # sleep for 30 sec + time.sleep(30) + if 'client_id' not in self.parameters: + return None + # check occm status + retries = 30 + while retries > 0: + agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id']) + if error is not None: + return "Error: Not able to get occm status after deleting VM: %s, %s" % (str(error), str(agent)) + if agent['status'] != ["active", "pending"]: + break + else: + time.sleep(10) + retries -= 1 if agent['status'] == "active" else 5 + if retries == 0 and agent['status'] == "active": + # Taking too long for terminating OCCM + return "Taking too long for instance to finish terminating. Latest status: %s" % str(agent) + return None + + def delete_occm_agents(self, agents): + error = self.na_helper.delete_occm_agents(self.rest_api, agents) + if error: + return "Error: deleting OCCM agent(s): %s" % error + return None + + def get_occm_agents(self): + if 'client_id' in self.parameters and self.parameters['state'] == 'absent': + agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id']) + if error == '403' and b'Action not allowed for user' in agent: + # assume the agent does not exist anymore + agents, error = [], None + self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id']) + else: + agents = [agent] + else: + agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'], + self.parameters['name'], 'GCP') + if error: + self.module.fail_json( + msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents))) + return agents + + def set_client_id(self, agents): + client_id = "" + client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent] + if len(client_ids) == 1: + client_id = client_ids[0] + self.parameters['client_id'] = client_ids[0] + elif 'client_id' in self.parameters and self.parameters['client_id'] in client_ids: + client_id = self.parameters['client_id'] + return client_id, client_ids + + def apply(self): + """ + Apply action to the Cloud Manager connector for GCP + :return: None + """ + client_id = "" + agents, client_ids = [], [] + current_vm = self.get_deploy_vm() + if current_vm and current_vm['operation']['status'] == 'terminated': + current_vm = None + current = current_vm + if self.parameters['state'] == 'absent' or current: + agents = self.get_occm_agents() + client_id, client_ids = self.set_client_id(agents) + if agents and current is None: + current = {} + if agents: + current['agents'] = agents + + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + client_id = self.create_occm_gcp() + elif cd_action == 'delete': + errors = [] + if current_vm: + error = self.delete_occm_gcp() + if error: + errors.append(error) + if agents: + error = self.delete_occm_agents(agents) + if error: + errors.append(error) + if errors: + self.module.fail_json(msg='. '.join(errors)) + + self.module.exit_json(changed=self.na_helper.changed, client_id=client_id, client_ids=client_ids) + + +def main(): + """ + Create Cloud Manager connector for GCP class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerConnectorGCP() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py new file mode 100644 index 000000000..3de1ebc53 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py @@ -0,0 +1,855 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_cvo_aws +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_cvo_aws +short_description: NetApp Cloud Manager CVO for AWS +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, delete, or manage Cloud Manager CVO for AWS. + +options: + + state: + description: + - Whether the specified Cloud Manager CVO for AWS should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the Cloud Manager CVO for AWS to manage. + type: str + + instance_type: + description: + - The instance type to use, which depends on the license type. + - Explore ['m5.xlarge']. + - Standard ['m5.2xlarge','r5.xlarge']. + - Premium ['m5.4xlarge','r5.2xlarge','c4.8xlarge']. + - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes. + type: str + default: m5.2xlarge + + license_type: + description: + - The type of license to use. + - For single node by Capacity ['capacity-paygo'] + - For single node by Node paygo ['cot-explore-paygo', 'cot-standard-paygo', 'cot-premium-paygo']. + - For single node by Node boyl ['cot-premium-byol']. + - For HA by Capacity ['ha-capacity-paygo'] + - For HA by Node paygo ['ha-cot-explore-paygo','ha-cot-standard-paygo','ha-cot-premium-paygo']. + - For HA by Node boyl ['ha-cot-premium-byol']. + choices: ['capacity-paygo', 'cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', \ + 'ha-cot-standard-paygo', 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', \ + 'ha-capacity-paygo'] + default: capacity-paygo + type: str + + provided_license: + description: + - Using a NLF license file for BYOL deployment. + type: str + + capacity_package_name: + description: + - Capacity package name is required when selecting a capacity based license. + - Essential only available with Bring Your Own License Capacity-Based. + - Professional available as an annual contract from AWS marketplace or Bring Your Own License Capacity-Based. + choices: ['Professional', 'Essential', 'Freemium'] + default: 'Essential' + type: str + version_added: 21.12.0 + + workspace_id: + description: + - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP. + - If not provided, Cloud Manager uses the first workspace. + - You can find the ID from the Workspace tab on U(https://cloudmanager.netapp.com). + type: str + + subnet_id: + description: + - The subnet id where the working environment will be created. Required when single node only. + type: str + + vpc_id: + description: + - The VPC ID where the working environment will be created. + - If this argument is not provided, the VPC will be calculated by using the provided subnet ID. + type: str + + region: + required: true + description: + - The region where the working environment will be created. + type: str + + data_encryption_type: + description: + - The type of encryption to use for the working environment. + choices: ['AWS', 'NONE'] + default: 'AWS' + type: str + + client_id: + required: true + description: + - The connector ID of the Cloud Manager Connector. + - You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com). + type: str + + ebs_volume_size: + description: + - EBS volume size for the first data aggregate. + - For GB, the value can be [100 or 500]. + - For TB, the value can be [1,2,4,8,16]. + default: 1 + type: int + + ebs_volume_size_unit: + description: + - The unit for ebs volume size. + choices: ['GB', 'TB'] + default: 'TB' + type: str + + ebs_volume_type: + description: + - The EBS volume type for the first data aggregate. + choices: ['gp3', 'gp2', 'io1', 'sc1', 'st1'] + default: 'gp2' + type: str + + security_group_id: + description: + - The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group. + type: str + + instance_profile_name: + description: + - The instance profile name for the working environment. If not provided, Cloud Manager creates the instance profile. + type: str + + svm_password: + required: true + description: + - The admin password for Cloud Volumes ONTAP. + - It will be updated on each run. + type: str + + svm_name: + description: + - The name of the SVM. + type: str + version_added: 21.22.0 + + ontap_version: + description: + - The required ONTAP version. Ignored if 'use_latest_version' is set to true. + type: str + default: 'latest' + + use_latest_version: + description: + - Indicates whether to use the latest available ONTAP version. + type: bool + default: true + + platform_serial_number: + description: + - The serial number for the cluster. This is required when using 'cot-premium-byol'. + type: str + + tier_level: + description: + - The tiering level when 'capacity_tier' is set to 'S3'. + choices: ['normal', 'ia', 'ia-single', 'intelligent'] + default: 'normal' + type: str + + cluster_key_pair_name: + description: + - SSH authentication key pair name + type: str + version_added: 21.20.0 + + nss_account: + description: + - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system. + - If the license type is BYOL and an NSS account is not provided, Cloud Manager tries to use the first existing NSS account. + type: str + + writing_speed_state: + description: + - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH']. + - This argument is not relevant for HA pairs. + type: str + + iops: + description: + - Provisioned IOPS. Required only when provider_volume_type is 'io1' or 'gp3'. + type: int + + throughput: + description: + - Unit is Mb/s. Valid range 125-1000. + - Required only when provider_volume_type is 'gp3'. + type: int + + capacity_tier: + description: + - Whether to enable data tiering for the first data aggregate. + choices: ['S3', 'NONE'] + default: 'S3' + type: str + + instance_tenancy: + description: + - The EC2 instance tenancy. + choices: ['default', 'dedicated'] + default: 'default' + type: str + + cloud_provider_account: + description: + - The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system. + - You can find the ID in Cloud Manager from the Settings > Credentials page. + - If not specified, Cloud Manager uses the instance profile of the Connector. + type: str + + backup_volumes_to_cbs: + description: + - Automatically enable back up of all volumes to S3. + default: false + type: bool + + enable_compliance: + description: + - Enable the Cloud Compliance service on the working environment. + default: false + type: bool + + enable_monitoring: + description: + - Enable the Monitoring service on the working environment. + default: false + type: bool + + optimized_network_utilization: + description: + - Use optimized network utilization. + default: true + type: bool + + kms_key_id: + description: + - Aws Encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified. + type: str + + kms_key_arn: + description: + - AWS encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified. + type: str + version_added: 21.10.0 + + aws_tag: + description: + - Additional tags for the AWS CVO working environment. + type: list + elements: dict + suboptions: + tag_key: + description: The key of the tag. + type: str + tag_value: + description: The tag value. + type: str + + is_ha: + description: + - Indicate whether the working environment is an HA pair or not. + type: bool + default: false + + platform_serial_number_node1: + description: + - For HA BYOL, the serial number for the first node. This is required when using 'ha-cot-premium-byol'. + type: str + + platform_serial_number_node2: + description: + - For HA BYOL, the serial number for the second node. This is required when using 'ha-cot-premium-byol'. + type: str + + node1_subnet_id: + description: + - For HA, the subnet ID of the first node. + type: str + + node2_subnet_id: + description: + - For HA, the subnet ID of the second node. + type: str + + mediator_subnet_id: + description: + - For HA, the subnet ID of the mediator. + type: str + + failover_mode: + description: + - For HA, the failover mode for the HA pair. 'PrivateIP' is for a single availability zone and 'FloatingIP' is for multiple availability zones. + type: str + choices: ['PrivateIP', 'FloatingIP'] + + mediator_assign_public_ip: + description: + - Boolean option to assign public IP. + type: bool + default: true + + mediator_key_pair_name: + description: + - For HA, the key pair name for the mediator instance. + type: str + + cluster_floating_ip: + description: + - For HA FloatingIP, the cluster management floating IP address. + type: str + + data_floating_ip: + description: + - For HA FloatingIP, the data floating IP address. + type: str + + data_floating_ip2: + description: + - For HA FloatingIP, the data floating IP address. + type: str + + svm_floating_ip: + description: + - For HA FloatingIP, the SVM management floating IP address. + type: str + + route_table_ids: + description: + - For HA FloatingIP, the list of route table IDs that will be updated with the floating IPs. + type: list + elements: str + + upgrade_ontap_version: + description: + - Indicates whether to upgrade ONTAP image on the CVO. + - If the current version already matches the desired version, no action is taken. + type: bool + default: false + version_added: 21.13.0 + + update_svm_password: + description: + - Indicates whether to update svm_password on the CVO. + - When set to true, the module is not idempotent, as we cannot read the current password. + type: bool + default: false + version_added: 21.13.0 + +notes: +- Support check_mode. +''' + +EXAMPLES = """ +- name: Create NetApp Cloud Manager CVO for AWS single + netapp.cloudmanager.na_cloudmanager_cvo_aws: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: AnsibleCVO + region: us-west-1 + subnet_id: subnet-xxxxxxx + vpc_id: vpc-xxxxxxxx + svm_password: P@assword! + client_id: "{{ xxxxxxxxxxxxxxx }}" + writing_speed_state: NORMAL + aws_tag: [ + {tag_key: abc, + tag_value: a123}] + +- name: Create NetApp Cloud Manager CVO for AWS HA + netapp.cloudmanager.na_cloudmanager_cvo_aws: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: AnsibleCVO + region: us-west-1 + subnet_id: subnet-xxxxxxx + vpc_id: vpc-xxxxxxxx + svm_password: P@assword! + client_id: "{{ xxxxxxxxxxxxxxx }}" + writing_speed_state: NORMAL + aws_tag: [ + {tag_key: abc, + tag_value: a123}] + is_ha: true + failover_mode: FloatingIP + node1_subnet_id: subnet-1 + node2_subnet_id: subnet-1 + mediator_subnet_id: subnet-1 + mediator_key_pair_name: key1 + cluster_floating_ip: 2.1.1.1 + data_floating_ip: 2.1.1.2 + data_floating_ip2: 2.1.1.3 + svm_floating_ip: 2.1.1.4 + route_table_ids: [rt-1,rt-2] + +- name: Delete NetApp Cloud Manager cvo for AWS + netapp.cloudmanager.na_cloudmanager_cvo_aws: + state: absent + name: ansible + region: us-west-1 + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + subnet_id: subnet-xxxxxxx + vpc_id: vpc-xxxxxxxx + svm_password: P@assword! + client_id: "{{ xxxxxxxxxxxxxxx }}" +""" + +RETURN = ''' +working_environment_id: + description: Newly created AWS CVO working_environment_id. + type: str + returned: success +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI +IMPORT_EXCEPTION = None + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_AWS_LIB = True +except ImportError as exc: + HAS_AWS_LIB = False + IMPORT_EXCEPTION = exc + +AWS_License_Types = ['cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', 'ha-cot-standard-paygo', + 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', 'capacity-paygo', 'ha-capacity-paygo'] + + +class NetAppCloudManagerCVOAWS: + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + instance_type=dict(required=False, type='str', default='m5.2xlarge'), + license_type=dict(required=False, type='str', choices=AWS_License_Types, default='capacity-paygo'), + workspace_id=dict(required=False, type='str'), + subnet_id=dict(required=False, type='str'), + vpc_id=dict(required=False, type='str'), + region=dict(required=True, type='str'), + data_encryption_type=dict(required=False, type='str', choices=['AWS', 'NONE'], default='AWS'), + ebs_volume_size=dict(required=False, type='int', default='1'), + ebs_volume_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'), + ebs_volume_type=dict(required=False, type='str', choices=['gp3', 'gp2', 'io1', 'sc1', 'st1'], default='gp2'), + svm_password=dict(required=True, type='str', no_log=True), + svm_name=dict(required=False, type='str'), + ontap_version=dict(required=False, type='str', default='latest'), + use_latest_version=dict(required=False, type='bool', default=True), + platform_serial_number=dict(required=False, type='str'), + capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'), + provided_license=dict(required=False, type='str'), + tier_level=dict(required=False, type='str', choices=['normal', 'ia', 'ia-single', 'intelligent'], default='normal'), + cluster_key_pair_name=dict(required=False, type='str'), + nss_account=dict(required=False, type='str'), + writing_speed_state=dict(required=False, type='str'), + iops=dict(required=False, type='int'), + throughput=dict(required=False, type='int'), + capacity_tier=dict(required=False, type='str', choices=['S3', 'NONE'], default='S3'), + instance_tenancy=dict(required=False, type='str', choices=['default', 'dedicated'], default='default'), + instance_profile_name=dict(required=False, type='str'), + security_group_id=dict(required=False, type='str'), + cloud_provider_account=dict(required=False, type='str'), + backup_volumes_to_cbs=dict(required=False, type='bool', default=False), + enable_compliance=dict(required=False, type='bool', default=False), + enable_monitoring=dict(required=False, type='bool', default=False), + optimized_network_utilization=dict(required=False, type='bool', default=True), + kms_key_id=dict(required=False, type='str', no_log=True), + kms_key_arn=dict(required=False, type='str', no_log=True), + client_id=dict(required=True, type='str'), + aws_tag=dict(required=False, type='list', elements='dict', options=dict( + tag_key=dict(type='str', no_log=False), + tag_value=dict(type='str') + )), + is_ha=dict(required=False, type='bool', default=False), + platform_serial_number_node1=dict(required=False, type='str'), + platform_serial_number_node2=dict(required=False, type='str'), + failover_mode=dict(required=False, type='str', choices=['PrivateIP', 'FloatingIP']), + mediator_assign_public_ip=dict(required=False, type='bool', default=True), + node1_subnet_id=dict(required=False, type='str'), + node2_subnet_id=dict(required=False, type='str'), + mediator_subnet_id=dict(required=False, type='str'), + mediator_key_pair_name=dict(required=False, type='str'), + cluster_floating_ip=dict(required=False, type='str'), + data_floating_ip=dict(required=False, type='str'), + data_floating_ip2=dict(required=False, type='str'), + svm_floating_ip=dict(required=False, type='str'), + route_table_ids=dict(required=False, type='list', elements='str'), + upgrade_ontap_version=dict(required=False, type='bool', default=False), + update_svm_password=dict(required=False, type='bool', default=False), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ['ebs_volume_type', 'gp3', ['iops', 'throughput']], + ['ebs_volume_type', 'io1', ['iops']], + ['license_type', 'cot-premium-byol', ['platform_serial_number']], + ['license_type', 'ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']], + ['license_type', 'capacity-paygo', ['capacity_package_name']], + ['license_type', 'ha-capacity-paygo', ['capacity_package_name']], + ], + required_one_of=[['refresh_token', 'sa_client_id']], + mutually_exclusive=[['kms_key_id', 'kms_key_arn']], + required_together=[['sa_client_id', 'sa_secret_key']], + supports_check_mode=True, + ) + + if HAS_AWS_LIB is False: + self.module.fail_json(msg="the python AWS library boto3 and botocore is required. Command is pip install boto3." + "Import error: %s" % str(IMPORT_EXCEPTION)) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.changeable_params = ['aws_tag', 'svm_password', 'svm_name', 'tier_level', 'ontap_version', 'instance_type', 'license_type', 'writing_speed_state'] + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = '/occm/api/%s' % ('aws/ha' if self.parameters['is_ha'] else 'vsa') + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + + def get_vpc(self): + """ + Get vpc + :return: vpc ID + """ + vpc_result = None + ec2 = boto3.client('ec2', region_name=self.parameters['region']) + + vpc_input = {'SubnetIds': [self.parameters['subnet_id']]} + + try: + vpc_result = ec2.describe_subnets(**vpc_input) + except ClientError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + + return vpc_result['Subnets'][0]['VpcId'] + + def create_cvo_aws(self): + """ Create AWS CVO """ + if self.parameters.get('workspace_id') is None: + response, msg = self.na_helper.get_tenant(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['workspace_id'] = response + + if self.parameters.get('vpc_id') is None and self.parameters['is_ha'] is False: + self.parameters['vpc_id'] = self.get_vpc() + + if self.parameters.get('nss_account') is None: + if self.parameters.get('platform_serial_number') is not None: + if not self.parameters['platform_serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'cot-premium-byol': + response, msg = self.na_helper.get_nss(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['nss_account'] = response + elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None: + if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\ + and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\ + and self.parameters['license_type'] == 'ha-cot-premium-byol': + response, msg = self.na_helper.get_nss(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['nss_account'] = response + + json = {"name": self.parameters['name'], + "region": self.parameters['region'], + "tenantId": self.parameters['workspace_id'], + "vpcId": self.parameters['vpc_id'], + "dataEncryptionType": self.parameters['data_encryption_type'], + "ebsVolumeSize": { + "size": self.parameters['ebs_volume_size'], + "unit": self.parameters['ebs_volume_size_unit']}, + "ebsVolumeType": self.parameters['ebs_volume_type'], + "svmPassword": self.parameters['svm_password'], + "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'], + "enableCompliance": self.parameters['enable_compliance'], + "enableMonitoring": self.parameters['enable_monitoring'], + "optimizedNetworkUtilization": self.parameters['optimized_network_utilization'], + "vsaMetadata": { + "ontapVersion": self.parameters['ontap_version'], + "licenseType": self.parameters['license_type'], + "useLatestVersion": self.parameters['use_latest_version'], + "instanceType": self.parameters['instance_type']}, + } + + if self.parameters['capacity_tier'] == "S3": + json.update({"capacityTier": self.parameters['capacity_tier'], + "tierLevel": self.parameters['tier_level']}) + + # clean default value if it is not by Capacity license + if not self.parameters['license_type'].endswith('capacity-paygo'): + json['vsaMetadata'].update({"capacityPackageName": ''}) + + if self.parameters.get('platform_serial_number') is not None: + json['vsaMetadata'].update({"platformSerialNumber": self.parameters['platform_serial_number']}) + + if self.parameters.get('provided_license') is not None: + json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']}) + + if self.parameters.get('capacity_package_name') is not None: + json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']}) + + if self.parameters.get('writing_speed_state') is not None: + json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()}) + + if self.parameters.get('iops') is not None: + json.update({"iops": self.parameters['iops']}) + + if self.parameters.get('throughput') is not None: + json.update({"throughput": self.parameters['throughput']}) + + if self.parameters.get('cluster_key_pair_name') is not None: + json.update({"clusterKeyPairName": self.parameters['cluster_key_pair_name']}) + + if self.parameters.get('instance_tenancy') is not None: + json.update({"instanceTenancy": self.parameters['instance_tenancy']}) + + if self.parameters.get('instance_profile_name') is not None: + json.update({"instanceProfileName": self.parameters['instance_profile_name']}) + + if self.parameters.get('security_group_id') is not None: + json.update({"securityGroupId": self.parameters['security_group_id']}) + + if self.parameters.get('cloud_provider_account') is not None: + json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']}) + + if self.parameters.get('backup_volumes_to_cbs') is not None: + json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']}) + + if self.parameters.get('svm_name') is not None: + json.update({"svmName": self.parameters['svm_name']}) + + if self.parameters['data_encryption_type'] == "AWS": + if self.parameters.get('kms_key_id') is not None: + json.update({"awsEncryptionParameters": {"kmsKeyId": self.parameters['kms_key_id']}}) + if self.parameters.get('kms_key_arn') is not None: + json.update({"awsEncryptionParameters": {"kmsKeyArn": self.parameters['kms_key_arn']}}) + + if self.parameters.get('aws_tag') is not None: + tags = [] + for each_tag in self.parameters['aws_tag']: + tag = { + 'tagKey': each_tag['tag_key'], + 'tagValue': each_tag['tag_value'] + } + + tags.append(tag) + json.update({"awsTags": tags}) + + if self.parameters['is_ha'] is True: + ha_params = dict({ + "mediatorAssignPublicIP": self.parameters['mediator_assign_public_ip'] + }) + + if self.parameters.get('failover_mode'): + ha_params["failoverMode"] = self.parameters['failover_mode'] + + if self.parameters.get('node1_subnet_id'): + ha_params["node1SubnetId"] = self.parameters['node1_subnet_id'] + + if self.parameters.get('node2_subnet_id'): + ha_params["node2SubnetId"] = self.parameters['node2_subnet_id'] + + if self.parameters.get('mediator_subnet_id'): + ha_params["mediatorSubnetId"] = self.parameters['mediator_subnet_id'] + + if self.parameters.get('mediator_key_pair_name'): + ha_params["mediatorKeyPairName"] = self.parameters['mediator_key_pair_name'] + + if self.parameters.get('cluster_floating_ip'): + ha_params["clusterFloatingIP"] = self.parameters['cluster_floating_ip'] + + if self.parameters.get('data_floating_ip'): + ha_params["dataFloatingIP"] = self.parameters['data_floating_ip'] + + if self.parameters.get('data_floating_ip2'): + ha_params["dataFloatingIP2"] = self.parameters['data_floating_ip2'] + + if self.parameters.get('svm_floating_ip'): + ha_params["svmFloatingIP"] = self.parameters['svm_floating_ip'] + + if self.parameters.get('route_table_ids'): + ha_params["routeTableIds"] = self.parameters['route_table_ids'] + + if self.parameters.get('platform_serial_number_node1'): + ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1'] + + if self.parameters.get('platform_serial_number_node2'): + ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2'] + + json["haParams"] = ha_params + + else: + json["subnetId"] = self.parameters['subnet_id'] + + api_url = '%s/working-environments' % self.rest_api.api_root_path + response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on creating cvo aws: %s, %s" % (str(error), str(response))) + working_environment_id = response['publicId'] + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id) + + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60) + + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AWS: %s" % str(err)) + + return working_environment_id + + def update_cvo_aws(self, working_environment_id, modify): + base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id) + for item in modify: + if item == 'svm_password': + response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'svm_name': + response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'aws_tag': + tag_list = None + if 'aws_tag' in self.parameters: + tag_list = self.parameters['aws_tag'] + response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'aws_tag', tag_list) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'tier_level': + response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'writing_speed_state': + response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'ontap_version': + response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'instance_type' or item == 'license_type': + response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers, + self.parameters['instance_type'], + self.parameters['license_type']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + + def delete_cvo_aws(self, we_id): + """ + Delete AWS CVO + """ + api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id) + response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on deleting cvo aws: %s, %s" % (str(error), str(response))) + + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60) + + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AWS: %s" % str(err)) + + def validate_cvo_params(self): + if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest": + self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true") + + if self.parameters['is_ha'] is True and self.parameters['license_type'] == "ha-cot-premium-byol": + if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None: + self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required" + "when having ha type as true and license_type as ha-cot-premium-byol") + + if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo': + self.parameters['license_type'] = 'ha-capacity-paygo' + + def apply(self): + """ + Apply action to the Cloud Manager CVO for AWS + :return: None + """ + working_environment_id = None + modify = None + current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers, + self.parameters['name'], "aws") + if current: + self.parameters['working_environment_id'] = current['publicId'] + # check the action + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if current and self.parameters['state'] != 'absent': + # Check mandatory parameters + self.validate_cvo_params() + working_environment_id = current['publicId'] + modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'aws') + if error is not None: + self.module.fail_json(changed=False, msg=error) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "create": + self.validate_cvo_params() + working_environment_id = self.create_cvo_aws() + elif cd_action == "delete": + self.delete_cvo_aws(current['publicId']) + else: + self.update_cvo_aws(current['publicId'], modify) + + self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id) + + +def main(): + """ + Create Cloud Manager CVO for AWS class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerCVOAWS() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py new file mode 100644 index 000000000..3212323e0 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py @@ -0,0 +1,746 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_cvo_azure +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_cvo_azure +short_description: NetApp Cloud Manager CVO/working environment in single or HA mode for Azure. +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, delete, or manage Cloud Manager CVO/working environment in single or HA mode for Azure. + +options: + + state: + description: + - Whether the specified Cloud Manager CVO for AZURE should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the Cloud Manager CVO for AZURE to manage. + type: str + + subscription_id: + required: true + description: + - The ID of the Azure subscription. + type: str + + instance_type: + description: + - The type of instance to use, which depends on the license type you chose. + - Explore ['Standard_DS3_v2']. + - Standard ['Standard_DS4_v2, Standard_DS13_v2, Standard_L8s_v2']. + - Premium ['Standard_DS5_v2', 'Standard_DS14_v2']. + - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes. + type: str + default: Standard_DS4_v2 + + license_type: + description: + - The type of license to use. + - For single node by Capacity ['capacity-paygo']. + - For single node by Node paygo ['azure-cot-explore-paygo', 'azure-cot-standard-paygo', 'azure-cot-premium-paygo']. + - For single node by Node byol ['azure-cot-premium-byol']. + - For HA by Capacity ['ha-capacity-paygo']. + - For HA by Node paygo ['azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo']. + - For HA by Node byol ['azure-ha-cot-premium-byol']. + choices: ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', \ + 'azure-cot-explore-paygo', 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', \ + 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo'] + default: 'capacity-paygo' + type: str + + provided_license: + description: + - Using a NLF license file for BYOL deployment. + type: str + + capacity_package_name: + description: + - Capacity package name is required when selecting a capacity based license. + - Essential only available with Bring Your Own License Capacity-Based. + - Professional available as an annual contract from a cloud provider or Bring Your Own License Capacity-Based. + choices: ['Professional', 'Essential', 'Freemium'] + default: 'Essential' + type: str + version_added: 21.12.0 + + workspace_id: + description: + - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP. + - If not provided, Cloud Manager uses the first workspace. + - You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com]. + type: str + + subnet_id: + required: true + description: + - The name of the subnet for the Cloud Volumes ONTAP system. + type: str + + vnet_id: + required: true + description: + - The name of the virtual network. + type: str + + vnet_resource_group: + description: + - The resource group in Azure associated to the virtual network. + type: str + + resource_group: + description: + - The resource_group where Cloud Volumes ONTAP will be created. + - If not provided, Cloud Manager generates the resource group name (name of the working environment/CVO with suffix '-rg'). + - If the resource group does not exist, it is created. + type: str + + allow_deploy_in_existing_rg: + description: + - Indicates if to allow creation in existing resource group. + type: bool + default: false + + cidr: + required: true + description: + - The CIDR of the VNET. If not provided, resource needs az login to authorize and fetch the cidr details from Azure. + type: str + + location: + required: true + description: + - The location where the working environment will be created. + type: str + + data_encryption_type: + description: + - The type of encryption to use for the working environment. + choices: ['AZURE', 'NONE'] + default: 'AZURE' + type: str + + azure_encryption_parameters: + description: + - AZURE encryption parameters. It is required if using AZURE encryption. + type: str + version_added: 21.10.0 + + storage_type: + description: + - The type of storage for the first data aggregate. + choices: ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS'] + default: 'Premium_LRS' + type: str + + client_id: + required: true + description: + - The connector ID of the Cloud Manager Connector. + - You can find the ID from the Connector tab on [https://cloudmanager.netapp.com]. + type: str + + disk_size: + description: + - Azure volume size for the first data aggregate. + - For GB, the value can be [100, 500]. + - For TB, the value can be [1,2,4,8,16]. + default: 1 + type: int + + disk_size_unit: + description: + - The unit for disk size. + choices: ['GB', 'TB'] + default: 'TB' + type: str + + security_group_id: + description: + - The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group. + type: str + + svm_password: + required: true + description: + - The admin password for Cloud Volumes ONTAP. + - It will be updated on each run. + type: str + + svm_name: + description: + - The name of the SVM. + type: str + version_added: 21.22.0 + + ontap_version: + description: + - The required ONTAP version. Ignored if 'use_latest_version' is set to true. + type: str + default: 'latest' + + use_latest_version: + description: + - Indicates whether to use the latest available ONTAP version. + type: bool + default: true + + serial_number: + description: + - The serial number for the cluster. + - Required when using one of these, 'azure-cot-premium-byol' or 'azure-ha-cot-premium-byol'. + type: str + + tier_level: + description: + - If capacity_tier is Blob, this argument indicates the tiering level. + choices: ['normal', 'cool'] + default: 'normal' + type: str + + nss_account: + description: + - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system. + - If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account. + type: str + + writing_speed_state: + description: + - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH']. + - This argument is not relevant for HA pairs. + type: str + + capacity_tier: + description: + - Whether to enable data tiering for the first data aggregate. + choices: ['Blob', 'NONE'] + default: 'Blob' + type: str + + cloud_provider_account: + description: + - The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system. + - You can find the ID in Cloud Manager from the Settings > Credentials page. + - If not specified, Cloud Manager uses the instance profile of the Connector. + type: str + + backup_volumes_to_cbs: + description: + - Automatically enable back up of all volumes to S3. + default: false + type: bool + + enable_compliance: + description: + - Enable the Cloud Compliance service on the working environment. + default: false + type: bool + + enable_monitoring: + description: + - Enable the Monitoring service on the working environment. + default: false + type: bool + + azure_tag: + description: + - Additional tags for the AZURE CVO working environment. + type: list + elements: dict + suboptions: + tag_key: + description: The key of the tag. + type: str + tag_value: + description: The tag value. + type: str + is_ha: + description: + - Indicate whether the working environment is an HA pair or not. + type: bool + default: false + + platform_serial_number_node1: + description: + - For HA BYOL, the serial number for the first node. + type: str + + platform_serial_number_node2: + description: + - For HA BYOL, the serial number for the second node. + type: str + + ha_enable_https: + description: + - For HA, enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false. + type: bool + version_added: 21.10.0 + + upgrade_ontap_version: + description: + - Indicates whether to upgrade ONTAP image on the CVO. + - If the current version already matches the desired version, no action is taken. + type: bool + default: false + version_added: 21.13.0 + + update_svm_password: + description: + - Indicates whether to update svm_password on the CVO. + - When set to true, the module is not idempotent, as we cannot read the current password. + type: bool + default: false + version_added: 21.13.0 + + availability_zone: + description: + - The availability zone on the location configuration. + type: int + version_added: 21.20.0 + + availability_zone_node1: + description: + - The node1 availability zone on the location configuration for HA. + type: int + version_added: 21.21.0 + + availability_zone_node2: + description: + - The node2 availability zone on the location configuration for HA. + type: int + version_added: 21.21.0 +''' + +EXAMPLES = """ +- name: create NetApp Cloud Manager CVO for Azure single + netapp.cloudmanager.na_cloudmanager_cvo_azure: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: AnsibleCVO + location: westus + subnet_id: subnet-xxxxxxx + vnet_id: vnetxxxxxxxx + svm_password: P@assword! + client_id: "{{ xxxxxxxxxxxxxxx }}" + writing_speed_state: NORMAL + azure_tag: [ + {tag_key: abc, + tag_value: a123}] + +- name: create NetApp Cloud Manager CVO for Azure HA + netapp.cloudmanager.na_cloudmanager_cvo_azure: + state: present + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + name: AnsibleCVO + location: westus + subnet_id: subnet-xxxxxxx + vnet_id: vnetxxxxxxxx + svm_password: P@assword! + client_id: "{{ xxxxxxxxxxxxxxx }}" + writing_speed_state: NORMAL + azure_tag: [ + {tag_key: abc, + tag_value: a123}] + is_ha: true + +- name: delete NetApp Cloud Manager cvo for Azure + netapp.cloudmanager.na_cloudmanager_cvo_azure: + state: absent + name: ansible + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + location: westus + subnet_id: subnet-xxxxxxx + vnet_id: vnetxxxxxxxx + svm_password: P@assword! + client_id: "{{ xxxxxxxxxxxxxxx }}" +""" + +RETURN = ''' +working_environment_id: + description: Newly created AZURE CVO working_environment_id. + type: str + returned: success +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + + +AZURE_License_Types = ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', 'azure-cot-explore-paygo', + 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo'] + + +class NetAppCloudManagerCVOAZURE: + """ object initialize and class methods """ + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + instance_type=dict(required=False, type='str', default='Standard_DS4_v2'), + license_type=dict(required=False, type='str', choices=AZURE_License_Types, default='capacity-paygo'), + workspace_id=dict(required=False, type='str'), + capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'), + provided_license=dict(required=False, type='str'), + subnet_id=dict(required=True, type='str'), + vnet_id=dict(required=True, type='str'), + vnet_resource_group=dict(required=False, type='str'), + resource_group=dict(required=False, type='str'), + cidr=dict(required=True, type='str'), + location=dict(required=True, type='str'), + subscription_id=dict(required=True, type='str'), + data_encryption_type=dict(required=False, type='str', choices=['AZURE', 'NONE'], default='AZURE'), + azure_encryption_parameters=dict(required=False, type='str', no_log=True), + storage_type=dict(required=False, type='str', choices=['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS'], default='Premium_LRS'), + disk_size=dict(required=False, type='int', default=1), + disk_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'), + svm_password=dict(required=True, type='str', no_log=True), + svm_name=dict(required=False, type='str'), + ontap_version=dict(required=False, type='str', default='latest'), + use_latest_version=dict(required=False, type='bool', default=True), + tier_level=dict(required=False, type='str', choices=['normal', 'cool'], default='normal'), + nss_account=dict(required=False, type='str'), + writing_speed_state=dict(required=False, type='str'), + capacity_tier=dict(required=False, type='str', choices=['Blob', 'NONE'], default='Blob'), + security_group_id=dict(required=False, type='str'), + cloud_provider_account=dict(required=False, type='str'), + backup_volumes_to_cbs=dict(required=False, type='bool', default=False), + enable_compliance=dict(required=False, type='bool', default=False), + enable_monitoring=dict(required=False, type='bool', default=False), + allow_deploy_in_existing_rg=dict(required=False, type='bool', default=False), + client_id=dict(required=True, type='str'), + azure_tag=dict(required=False, type='list', elements='dict', options=dict( + tag_key=dict(type='str', no_log=False), + tag_value=dict(type='str') + )), + serial_number=dict(required=False, type='str'), + is_ha=dict(required=False, type='bool', default=False), + platform_serial_number_node1=dict(required=False, type='str'), + platform_serial_number_node2=dict(required=False, type='str'), + ha_enable_https=dict(required=False, type='bool'), + upgrade_ontap_version=dict(required=False, type='bool', default=False), + update_svm_password=dict(required=False, type='bool', default=False), + availability_zone=dict(required=False, type='int'), + availability_zone_node1=dict(required=False, type='int'), + availability_zone_node2=dict(required=False, type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + required_if=[ + ['license_type', 'capacity-paygo', ['capacity_package_name']], + ['license_type', 'ha-capacity-paygo', ['capacity_package_name']], + ['license_type', 'azure-cot-premium-byol', ['serial_number']], + ['license_type', 'azure-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']], + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.changeable_params = ['svm_password', 'svm_name', 'azure_tag', 'tier_level', 'ontap_version', + 'instance_type', 'license_type', 'writing_speed_state'] + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = '/occm/api/azure/%s' % ('ha' if self.parameters['is_ha'] else 'vsa') + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + + def create_cvo_azure(self): + """ + Create AZURE CVO + """ + if self.parameters.get('workspace_id') is None: + response, msg = self.na_helper.get_tenant(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['workspace_id'] = response + + if self.parameters.get('nss_account') is None: + if self.parameters.get('serial_number') is not None: + if not self.parameters['serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'azure-cot-premium-byol': + response, msg = self.na_helper.get_nss(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['nss_account'] = response + elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None: + if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\ + and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\ + and self.parameters['license_type'] == 'azure-ha-cot-premium-byol': + response, msg = self.na_helper.get_nss(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['nss_account'] = response + + json = {"name": self.parameters['name'], + "region": self.parameters['location'], + "subscriptionId": self.parameters['subscription_id'], + "tenantId": self.parameters['workspace_id'], + "storageType": self.parameters['storage_type'], + "dataEncryptionType": self.parameters['data_encryption_type'], + "optimizedNetworkUtilization": True, + "diskSize": { + "size": self.parameters['disk_size'], + "unit": self.parameters['disk_size_unit']}, + "svmPassword": self.parameters['svm_password'], + "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'], + "enableCompliance": self.parameters['enable_compliance'], + "enableMonitoring": self.parameters['enable_monitoring'], + "vsaMetadata": { + "ontapVersion": self.parameters['ontap_version'], + "licenseType": self.parameters['license_type'], + "useLatestVersion": self.parameters['use_latest_version'], + "instanceType": self.parameters['instance_type']} + } + + if self.parameters['capacity_tier'] == "Blob": + json.update({"capacityTier": self.parameters['capacity_tier'], + "tierLevel": self.parameters['tier_level']}) + + if self.parameters.get('provided_license') is not None: + json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']}) + + # clean default value if it is not by Capacity license + if not self.parameters['license_type'].endswith('capacity-paygo'): + json['vsaMetadata'].update({"capacityPackageName": ''}) + + if self.parameters.get('capacity_package_name') is not None: + json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']}) + + if self.parameters.get('cidr') is not None: + json.update({"cidr": self.parameters['cidr']}) + + if self.parameters.get('writing_speed_state') is not None: + json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()}) + + if self.parameters.get('resource_group') is not None: + json.update({"resourceGroup": self.parameters['resource_group'], + "allowDeployInExistingRg": self.parameters['allow_deploy_in_existing_rg']}) + else: + json.update({"resourceGroup": (self.parameters['name'] + '-rg')}) + + if self.parameters.get('serial_number') is not None: + json.update({"serialNumber": self.parameters['serial_number']}) + + if self.parameters.get('security_group_id') is not None: + json.update({"securityGroupId": self.parameters['security_group_id']}) + + if self.parameters.get('cloud_provider_account') is not None: + json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']}) + + if self.parameters.get('backup_volumes_to_cbs') is not None: + json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']}) + + if self.parameters.get('nss_account') is not None: + json.update({"nssAccount": self.parameters['nss_account']}) + + if self.parameters.get('availability_zone') is not None: + json.update({"availabilityZone": self.parameters['availability_zone']}) + + if self.parameters['data_encryption_type'] == "AZURE": + if self.parameters.get('azure_encryption_parameters') is not None: + json.update({"azureEncryptionParameters": {"key": self.parameters['azure_encryption_parameters']}}) + + if self.parameters.get('svm_name') is not None: + json.update({"svmName": self.parameters['svm_name']}) + + if self.parameters.get('azure_tag') is not None: + tags = [] + for each_tag in self.parameters['azure_tag']: + tag = { + 'tagKey': each_tag['tag_key'], + 'tagValue': each_tag['tag_value'] + } + + tags.append(tag) + json.update({"azureTags": tags}) + + if self.parameters['is_ha']: + ha_params = dict() + + if self.parameters.get('platform_serial_number_node1'): + ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1'] + + if self.parameters.get('platform_serial_number_node2'): + ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2'] + + if self.parameters.get('availability_zone_node1'): + ha_params["availabilityZoneNode1"] = self.parameters['availability_zone_node1'] + + if self.parameters.get('availability_zone_node2'): + ha_params["availabilityZoneNode2"] = self.parameters['availability_zone_node2'] + + if self.parameters.get('ha_enable_https') is not None: + ha_params['enableHttps'] = self.parameters['ha_enable_https'] + + json["haParams"] = ha_params + + resource_group = self.parameters['vnet_resource_group'] if self.parameters.get( + 'vnet_resource_group') is not None else self.parameters['resource_group'] + + resource_group_path = 'subscriptions/%s/resourceGroups/%s' % (self.parameters['subscription_id'], resource_group) + vnet_format = '%s/%s' if self.rest_api.simulator else '/%s/providers/Microsoft.Network/virtualNetworks/%s' + vnet = vnet_format % (resource_group_path, self.parameters['vnet_id']) + json.update({"vnetId": vnet}) + json.update({"subnetId": '%s/subnets/%s' % (vnet, self.parameters['subnet_id'])}) + + api_url = '%s/working-environments' % self.rest_api.api_root_path + response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on creating cvo azure: %s, %s" % (str(error), str(response))) + working_environment_id = response['publicId'] + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60) + + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AZURE: %s" % str(err)) + + return working_environment_id + + def get_extra_azure_tags(self, rest_api, headers): + # Get extra azure tag from current working environment + # It is created automatically not from the user input + we, err = self.na_helper.get_working_environment_details(rest_api, headers) + if err is not None: + self.module.fail_json(msg="Error: unexpected response to get CVO AZURE details: %s" % str(err)) + return [{'tag_key': 'DeployedByOccm', 'tag_value': we['userTags']['DeployedByOccm']}] if 'DeployedByOccm' in \ + we['userTags'] else [] + + def update_cvo_azure(self, working_environment_id, modify): + base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id) + for item in modify: + if item == 'svm_password': + response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'svm_name': + response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'azure_tag': + # default azure tag + tag_list = self.get_extra_azure_tags(self.rest_api, self.headers) + if 'azure_tag' in self.parameters: + tag_list.extend(self.parameters['azure_tag']) + response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'azure_tag', tag_list) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'tier_level': + response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'writing_speed_state': + response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'ontap_version': + response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'instance_type' or item == 'license_type': + response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers, + self.parameters['instance_type'], + self.parameters['license_type']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + + def delete_cvo_azure(self, we_id): + """ + Delete AZURE CVO + """ + + api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id) + response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on deleting cvo azure: %s, %s" % (str(error), str(response))) + + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60) + + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AZURE: %s" % str(err)) + + def validate_cvo_params(self): + if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest": + self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true") + + if self.parameters.get('serial_number') is None and self.parameters['license_type'] == "azure-cot-premium-byol": + self.module.fail_json(msg="serial_number parameter required when having license_type as azure-cot-premium-byol") + + if self.parameters['is_ha'] and self.parameters['license_type'] == "azure-ha-cot-premium-byol": + if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None: + self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required" + "when having ha type as true and license_type as azure-ha-cot-premium-byol") + if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo': + self.parameters['license_type'] == 'ha-capacity-paygo' + + def apply(self): + """ + Apply action to the Cloud Manager CVO for AZURE + :return: None + """ + working_environment_id = None + modify = None + current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers, + self.parameters['name'], "azure") + if current: + self.parameters['working_environment_id'] = current['publicId'] + # check the action whether to create, delete, or not + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if current and self.parameters['state'] != 'absent': + working_environment_id = current['publicId'] + modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'azure') + if error is not None: + self.module.fail_json(changed=False, msg=error) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "create": + self.validate_cvo_params() + working_environment_id = self.create_cvo_azure() + elif cd_action == "delete": + self.delete_cvo_azure(current['publicId']) + else: + self.update_cvo_azure(current['publicId'], modify) + + self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id) + + +def main(): + """ + Create Cloud Manager CVO for AZURE class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerCVOAZURE() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py new file mode 100644 index 000000000..7abbca823 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py @@ -0,0 +1,858 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_cvo_gcp +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_cvo_gcp +short_description: NetApp Cloud Manager CVO for GCP +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - Create, delete, or manage Cloud Manager CVO for GCP. + +options: + + backup_volumes_to_cbs: + description: + - Automatically backup all volumes to cloud. + default: false + type: bool + + capacity_tier: + description: + - Whether to enable data tiering for the first data aggregate. + choices: ['cloudStorage'] + type: str + + client_id: + required: true + description: + - The connector ID of the Cloud Manager Connector. + - You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com). + type: str + + data_encryption_type: + description: + - Type of encryption to use for this working environment. + choices: ['GCP'] + type: str + + gcp_encryption_parameters: + description: + - The GCP encryption parameters. + type: str + version_added: 21.10.0 + + enable_compliance: + description: + - Enable the Cloud Compliance service on the working environment. + default: false + type: bool + + firewall_rule: + description: + - Firewall name for a single node cluster. + type: str + + gcp_labels: + description: + - Optionally provide up to four key-value pairs with which to all GCP entities created by Cloud Manager. + type: list + elements: dict + suboptions: + label_key: + description: The key of the label. + type: str + label_value: + description: The label value. + type: str + + gcp_service_account: + description: + - The gcp_service_account email in order to enable tiering of cold data to Google Cloud Storage. + required: true + type: str + + gcp_volume_size: + description: + - GCP volume size. + type: int + + gcp_volume_size_unit: + description: + - GCP volume size unit. + choices: ['GB', 'TB'] + type: str + + gcp_volume_type: + description: + - GCP volume type. + choices: ['pd-balanced', 'pd-standard', 'pd-ssd'] + type: str + + instance_type: + description: + - The type of instance to use, which depends on the license type you choose. + - Explore ['custom-4-16384']. + - Standard ['n1-standard-8']. + - Premium ['n1-standard-32']. + - BYOL all instance types defined for PayGo. + - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes. + default: 'n1-standard-8' + type: str + + is_ha: + description: + - Indicate whether the working environment is an HA pair or not. + type: bool + default: false + + license_type: + description: + - The type of license to use. + - For single node by Capacity ['capacity-paygo']. + - For single node by Node paygo ['gcp-cot-explore-paygo', 'gcp-cot-standard-paygo', 'gcp-cot-premium-paygo']. + - For single node by Node byol ['gcp-cot-premium-byol']. + - For HA by Capacity ['ha-capacity-paygo']. + - For HA by Node paygo ['gcp-ha-cot-explore-paygo', 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo']. + - For HA by Node byol ['gcp-cot-premium-byol']. + choices: ['gcp-cot-standard-paygo', 'gcp-cot-explore-paygo', 'gcp-cot-premium-paygo', 'gcp-cot-premium-byol', \ + 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo', 'gcp-ha-cot-explore-paygo', 'gcp-ha-cot-premium-byol', \ + 'capacity-paygo', 'ha-capacity-paygo'] + type: str + default: 'capacity-paygo' + + provided_license: + description: + - Using a NLF license file for BYOL deployment + type: str + + capacity_package_name: + description: + - Capacity package name is required when selecting a capacity based license. + choices: ['Professional', 'Essential', 'Freemium'] + default: 'Essential' + type: str + version_added: 21.12.0 + + mediator_zone: + description: + - The zone for mediator. + - Option for HA pair only. + type: str + + name: + description: + - The name of the Cloud Manager CVO for GCP to manage. + required: true + type: str + + network_project_id: + description: + - The project id in GCP associated with the Subnet. + - If not provided, it is assumed that the Subnet is within the previously specified project id. + type: str + + node1_zone: + description: + - Zone for node 1. + - Option for HA pair only. + type: str + + node2_zone: + description: + - Zone for node 2. + - Option for HA pair only. + type: str + + nss_account: + description: + - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system. + - If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account. + type: str + + ontap_version: + description: + - The required ONTAP version. Ignored if 'use_latest_version' is set to true. + type: str + default: 'latest' + + platform_serial_number_node1: + description: + - For HA BYOL, the serial number for the first node. + - Option for HA pair only. + type: str + + platform_serial_number_node2: + description: + - For HA BYOL, the serial number for the second node. + - Option for HA pair only. + type: str + + project_id: + description: + - The ID of the GCP project. + required: true + type: str + + platform_serial_number: + description: + - The serial number for the system. Required when using 'gcp-cot-premium-byol'. + type: str + + state: + description: + - Whether the specified Cloud Manager CVO for GCP should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + subnet_id: + description: + - The name of the subnet for Cloud Volumes ONTAP. + type: str + + subnet0_node_and_data_connectivity: + description: + - Subnet path for nic1, required for node and data connectivity. + - If using shared VPC, network_project_id must be provided. + - Option for HA pair only. + type: str + + subnet1_cluster_connectivity: + description: + - Subnet path for nic2, required for cluster connectivity. + - Option for HA pair only. + type: str + + subnet2_ha_connectivity: + description: + - Subnet path for nic3, required for HA connectivity. + - Option for HA pair only. + type: str + + subnet3_data_replication: + description: + - Subnet path for nic4, required for HA connectivity. + - Option for HA pair only. + type: str + + svm_password: + description: + - The admin password for Cloud Volumes ONTAP. + - It will be updated on each run. + type: str + + svm_name: + description: + - The name of the SVM. + type: str + version_added: 21.22.0 + + tier_level: + description: + - The tiering level when 'capacity_tier' is set to 'cloudStorage'. + choices: ['standard', 'nearline', 'coldline'] + default: 'standard' + type: str + + use_latest_version: + description: + - Indicates whether to use the latest available ONTAP version. + type: bool + default: true + + vpc_id: + required: true + description: + - The name of the VPC. + type: str + + vpc0_firewall_rule_name: + description: + - Firewall rule name for vpc1. + - Option for HA pair only. + type: str + + vpc0_node_and_data_connectivity: + description: + - VPC path for nic1, required for node and data connectivity. + - If using shared VPC, network_project_id must be provided. + - Option for HA pair only. + type: str + + vpc1_cluster_connectivity: + description: + - VPC path for nic2, required for cluster connectivity. + - Option for HA pair only. + type: str + + vpc1_firewall_rule_name: + description: + - Firewall rule name for vpc2. + - Option for HA pair only. + type: str + + vpc2_ha_connectivity: + description: + - VPC path for nic3, required for HA connectivity. + - Option for HA pair only. + type: str + + vpc2_firewall_rule_name: + description: + - Firewall rule name for vpc3. + - Option for HA pair only. + type: str + + vpc3_data_replication: + description: + - VPC path for nic4, required for data replication. + - Option for HA pair only. + type: str + + vpc3_firewall_rule_name: + description: + - Firewall rule name for vpc4. + - Option for HA pair only. + type: str + + workspace_id: + description: + - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP. + - If not provided, Cloud Manager uses the first workspace. + - You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com]. + type: str + + writing_speed_state: + description: + - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH']. + - Default value is 'NORMAL' for non-HA GCP CVO + - This argument is not relevant for HA pairs. + type: str + + zone: + description: + - The zone of the region where the working environment will be created. + required: true + type: str + + upgrade_ontap_version: + description: + - Indicates whether to upgrade ONTAP image on the CVO. + - If the current version already matches the desired version, no action is taken. + type: bool + default: false + version_added: 21.13.0 + + update_svm_password: + description: + - Indicates whether to update svm_password on the CVO. + - When set to true, the module is not idempotent, as we cannot read the current password. + type: bool + default: false + version_added: 21.13.0 + + subnet_path: + description: + - Subnet path for a single node cluster. + type: str + version_added: 21.20.0 + +notes: +- Support check_mode. +''' + +EXAMPLES = """ + +- name: Create NetApp Cloud Manager cvo for GCP + netapp.cloudmanager.na_cloudmanager_cvo_gcp: + state: present + name: ansiblecvogcp + project_id: default-project + zone: us-east4-b + subnet_path: projects//regions//subnetworks/ + subnet_id: projects//regions//subnetworks/ + gcp_volume_type: pd-ssd + gcp_volume_size: 500 + gcp_volume_size_unit: GB + gcp_service_account: "{{ xxxxxxxxxxxxxxx }}" + data_encryption_type: GCP + svm_password: "{{ xxxxxxxxxxxxxxx }}" + ontap_version: latest + use_latest_version: true + license_type: capacity-paygo + instance_type: n1-standard-8 + client_id: "{{ xxxxxxxxxxxxxxx }}" + workspace_id: "{{ xxxxxxxxxxxxxxx }}" + capacity_tier: cloudStorage + writing_speed_state: NORMAL + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + vpc_id: default + gcp_labels: + - label_key: key1 + label_value: value1 + - label_key: key2 + label_value: value2 + +- name: Create NetApp Cloud Manager cvo ha for GCP + netapp.cloudmanager.na_cloudmanager_cvo_gcp: + state: present + name: ansiblecvogcpha + project_id: "default-project" + zone: us-east1-b + gcp_volume_type: pd-ssd + gcp_volume_size: 500 + gcp_volume_size_unit: GB + gcp_service_account: "{{ xxxxxxxxxxxxxxx }}" + data_encryption_type: GCP + svm_password: "{{ xxxxxxxxxxxxxxx }}" + ontap_version: ONTAP-9.9.0.T1.gcpha + use_latest_version: false + license_type: ha-capacity-paygo + instance_type: custom-4-16384 + client_id: "{{ xxxxxxxxxxxxxxx }}" + workspace_id: "{{ xxxxxxxxxxxxxxx }}" + capacity_tier: cloudStorage + writing_speed_state: NORMAL + refresh_token: "{{ xxxxxxxxxxxxxxx }}" + is_ha: true + mediator_zone: us-east1-b + node1_zone: us-east1-b + node2_zone: us-east1-b + subnet0_node_and_data_connectivity: default + subnet1_cluster_connectivity: subnet2 + subnet2_ha_connectivity: subnet3 + subnet3_data_replication: subnet1 + vpc0_node_and_data_connectivity: default + vpc1_cluster_connectivity: vpc2 + vpc2_ha_connectivity: vpc3 + vpc3_data_replication: vpc1 + vpc_id: default + subnet_id: default + +""" + +RETURN = ''' +working_environment_id: + description: Newly created GCP CVO working_environment_id. + type: str + returned: success +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + + +GCP_LICENSE_TYPES = ["gcp-cot-standard-paygo", "gcp-cot-explore-paygo", "gcp-cot-premium-paygo", "gcp-cot-premium-byol", + "gcp-ha-cot-standard-paygo", "gcp-ha-cot-premium-paygo", "gcp-ha-cot-explore-paygo", + "gcp-ha-cot-premium-byol", "capacity-paygo", "ha-capacity-paygo"] +GOOGLE_API_URL = "https://www.googleapis.com/compute/v1/projects" + + +class NetAppCloudManagerCVOGCP: + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + backup_volumes_to_cbs=dict(required=False, type='bool', default=False), + capacity_tier=dict(required=False, type='str', choices=['cloudStorage']), + client_id=dict(required=True, type='str'), + data_encryption_type=dict(required=False, choices=['GCP'], type='str'), + gcp_encryption_parameters=dict(required=False, type='str', no_log=True), + enable_compliance=dict(required=False, type='bool', default=False), + firewall_rule=dict(required=False, type='str'), + gcp_labels=dict(required=False, type='list', elements='dict', options=dict( + label_key=dict(type='str', no_log=False), + label_value=dict(type='str') + )), + gcp_service_account=dict(required=True, type='str'), + gcp_volume_size=dict(required=False, type='int'), + gcp_volume_size_unit=dict(required=False, choices=['GB', 'TB'], type='str'), + gcp_volume_type=dict(required=False, choices=['pd-balanced', 'pd-standard', 'pd-ssd'], type='str'), + instance_type=dict(required=False, type='str', default='n1-standard-8'), + is_ha=dict(required=False, type='bool', default=False), + license_type=dict(required=False, type='str', choices=GCP_LICENSE_TYPES, default='capacity-paygo'), + mediator_zone=dict(required=False, type='str'), + name=dict(required=True, type='str'), + network_project_id=dict(required=False, type='str'), + node1_zone=dict(required=False, type='str'), + node2_zone=dict(required=False, type='str'), + nss_account=dict(required=False, type='str'), + ontap_version=dict(required=False, type='str', default='latest'), + platform_serial_number=dict(required=False, type='str'), + platform_serial_number_node1=dict(required=False, type='str'), + platform_serial_number_node2=dict(required=False, type='str'), + project_id=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + subnet_id=dict(required=False, type='str'), + subnet0_node_and_data_connectivity=dict(required=False, type='str'), + subnet1_cluster_connectivity=dict(required=False, type='str'), + subnet2_ha_connectivity=dict(required=False, type='str'), + subnet3_data_replication=dict(required=False, type='str'), + svm_password=dict(required=False, type='str', no_log=True), + svm_name=dict(required=False, type='str'), + tier_level=dict(required=False, type='str', choices=['standard', 'nearline', 'coldline'], + default='standard'), + use_latest_version=dict(required=False, type='bool', default=True), + capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'), + provided_license=dict(required=False, type='str'), + vpc_id=dict(required=True, type='str'), + vpc0_firewall_rule_name=dict(required=False, type='str'), + vpc0_node_and_data_connectivity=dict(required=False, type='str'), + vpc1_cluster_connectivity=dict(required=False, type='str'), + vpc1_firewall_rule_name=dict(required=False, type='str'), + vpc2_firewall_rule_name=dict(required=False, type='str'), + vpc2_ha_connectivity=dict(required=False, type='str'), + vpc3_data_replication=dict(required=False, type='str'), + vpc3_firewall_rule_name=dict(required=False, type='str'), + workspace_id=dict(required=False, type='str'), + writing_speed_state=dict(required=False, type='str'), + zone=dict(required=True, type='str'), + upgrade_ontap_version=dict(required=False, type='bool', default=False), + update_svm_password=dict(required=False, type='bool', default=False), + subnet_path=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + required_if=[ + ['license_type', 'capacity-paygo', ['capacity_package_name']], + ['license_type', 'ha-capacity-paygo', ['capacity_package_name']], + ['license_type', 'gcp-cot-premium-byol', ['platform_serial_number']], + ['license_type', 'gcp-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']], + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.changeable_params = ['svm_password', 'svm_name', 'tier_level', 'gcp_labels', 'ontap_version', + 'instance_type', 'license_type', 'writing_speed_state'] + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = '/occm/api/gcp/%s' % ('ha' if self.parameters['is_ha'] else 'vsa') + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + + @staticmethod + def has_self_link(param): + return param.startswith(("https://www.googleapis.com/compute/", "projects/")) + + def create_cvo_gcp(self): + + if self.parameters.get('workspace_id') is None: + response, msg = self.na_helper.get_tenant(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['workspace_id'] = response + + if self.parameters.get('nss_account') is None: + if self.parameters.get('platform_serial_number') is not None: + if not self.parameters['platform_serial_number'].startswith('Eval-'): + if self.parameters['license_type'] == 'gcp-cot-premium-byol' or self.parameters['license_type'] == 'gcp-ha-cot-premium-byol': + response, msg = self.na_helper.get_nss(self.rest_api, self.headers) + if response is None: + self.module.fail_json(msg) + self.parameters['nss_account'] = response + + if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo': + self.parameters['license_type'] == 'ha-capacity-paygo' + + json = {"name": self.parameters['name'], + "region": self.parameters['zone'], + "tenantId": self.parameters['workspace_id'], + "vpcId": self.parameters['vpc_id'], + "gcpServiceAccount": self.parameters['gcp_service_account'], + "gcpVolumeSize": { + "size": self.parameters['gcp_volume_size'], + "unit": self.parameters['gcp_volume_size_unit']}, + "gcpVolumeType": self.parameters['gcp_volume_type'], + "svmPassword": self.parameters['svm_password'], + "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'], + "enableCompliance": self.parameters['enable_compliance'], + "vsaMetadata": { + "ontapVersion": self.parameters['ontap_version'], + "licenseType": self.parameters['license_type'], + "useLatestVersion": self.parameters['use_latest_version'], + "instanceType": self.parameters['instance_type']} + } + + if self.parameters['is_ha'] is False: + if self.parameters.get('writing_speed_state') is None: + self.parameters['writing_speed_state'] = 'NORMAL' + json.update({'writingSpeedState': self.parameters['writing_speed_state'].upper()}) + + if self.parameters.get('data_encryption_type') is not None and self.parameters['data_encryption_type'] == "GCP": + json.update({'dataEncryptionType': self.parameters['data_encryption_type']}) + if self.parameters.get('gcp_encryption_parameters') is not None: + json.update({"gcpEncryptionParameters": {"key": self.parameters['gcp_encryption_parameters']}}) + + if self.parameters.get('provided_license') is not None: + json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']}) + + # clean default value if it is not by Capacity license + if not self.parameters['license_type'].endswith('capacity-paygo'): + json['vsaMetadata'].update({"capacityPackageName": ''}) + + if self.parameters.get('capacity_package_name') is not None: + json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']}) + + if self.parameters.get('project_id'): + json.update({'project': self.parameters['project_id']}) + + if self.parameters.get('nss_account'): + json.update({'nssAccount': self.parameters['nss_account']}) + + if self.parameters.get('subnet_id'): + json.update({'subnetId': self.parameters['subnet_id']}) + + if self.parameters.get('subnet_path'): + json.update({'subnetPath': self.parameters['subnet_path']}) + + if self.parameters.get('platform_serial_number') is not None: + json.update({"serialNumber": self.parameters['platform_serial_number']}) + + if self.parameters.get('capacity_tier') is not None and self.parameters['capacity_tier'] == "cloudStorage": + json.update({"capacityTier": self.parameters['capacity_tier'], + "tierLevel": self.parameters['tier_level']}) + + if self.parameters.get('svm_name') is not None: + json.update({"svmName": self.parameters['svm_name']}) + + if self.parameters.get('gcp_labels') is not None: + labels = [] + for each_label in self.parameters['gcp_labels']: + label = { + 'labelKey': each_label['label_key'], + 'labelValue': each_label['label_value'] + } + + labels.append(label) + json.update({"gcpLabels": labels}) + + if self.parameters.get('firewall_rule'): + json.update({'firewallRule': self.parameters['firewall_rule']}) + + if self.parameters['is_ha'] is True: + ha_params = dict() + + if self.parameters.get('network_project_id') is not None: + network_project_id = self.parameters.get('network_project_id') + else: + network_project_id = self.parameters['project_id'] + + if not self.has_self_link(self.parameters['subnet_id']): + json.update({'subnetId': 'projects/%s/regions/%s/subnetworks/%s' % (network_project_id, + self.parameters['zone'][:-2], + self.parameters['subnet_id'])}) + + if self.parameters.get('platform_serial_number_node1'): + ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1'] + + if self.parameters.get('platform_serial_number_node2'): + ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2'] + + if self.parameters.get('node1_zone'): + ha_params["node1Zone"] = self.parameters['node1_zone'] + + if self.parameters.get('node2_zone'): + ha_params["node2Zone"] = self.parameters['node2_zone'] + + if self.parameters.get('mediator_zone'): + ha_params["mediatorZone"] = self.parameters['mediator_zone'] + + if self.parameters.get('vpc0_node_and_data_connectivity'): + if self.has_self_link(self.parameters['vpc0_node_and_data_connectivity']): + ha_params["vpc0NodeAndDataConnectivity"] = self.parameters['vpc0_node_and_data_connectivity'] + else: + ha_params["vpc0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format( + network_project_id, self.parameters['vpc0_node_and_data_connectivity']) + + if self.parameters.get('vpc1_cluster_connectivity'): + if self.has_self_link(self.parameters['vpc1_cluster_connectivity']): + ha_params["vpc1ClusterConnectivity"] = self.parameters['vpc1_cluster_connectivity'] + else: + ha_params["vpc1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format( + network_project_id, self.parameters['vpc1_cluster_connectivity']) + + if self.parameters.get('vpc2_ha_connectivity'): + if self.has_self_link(self.parameters['vpc2_ha_connectivity']): + ha_params["vpc2HAConnectivity"] = self.parameters['vpc2_ha_connectivity'] + else: + ha_params["vpc2HAConnectivity"] = "https://www.googleapis.com/compute/v1/projects/{0}/global/networks" \ + "/{1}".format(network_project_id, self.parameters['vpc2_ha_connectivity']) + + if self.parameters.get('vpc3_data_replication'): + if self.has_self_link(self.parameters['vpc3_data_replication']): + ha_params["vpc3DataReplication"] = self.parameters['vpc3_data_replication'] + else: + ha_params["vpc3DataReplication"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format( + network_project_id, self.parameters['vpc3_data_replication']) + + if self.parameters.get('subnet0_node_and_data_connectivity'): + if self.has_self_link(self.parameters['subnet0_node_and_data_connectivity']): + ha_params["subnet0NodeAndDataConnectivity"] = self.parameters['subnet0_node_and_data_connectivity'] + else: + ha_params["subnet0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".\ + format(network_project_id, self.parameters['zone'][:-2], self.parameters['subnet0_node_and_data_connectivity']) + + if self.parameters.get('subnet1_cluster_connectivity'): + if self.has_self_link(self.parameters['subnet1_cluster_connectivity']): + ha_params["subnet1ClusterConnectivity"] = self.parameters['subnet1_cluster_connectivity'] + else: + ha_params["subnet1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format( + network_project_id, self.parameters['zone'][:-2], + self.parameters['subnet1_cluster_connectivity']) + + if self.parameters.get('subnet2_ha_connectivity'): + if self.has_self_link(self.parameters['subnet2_ha_connectivity']): + ha_params["subnet2HAConnectivity"] = self.parameters['subnet2_ha_connectivity'] + else: + ha_params["subnet2HAConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format( + network_project_id, self.parameters['zone'][:-2], + self.parameters['subnet2_ha_connectivity']) + + if self.parameters.get('subnet3_data_replication'): + if self.has_self_link(self.parameters['subnet3_data_replication']): + ha_params["subnet3DataReplication"] = self.parameters['subnet3_data_replication'] + else: + ha_params["subnet3DataReplication"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}". \ + format(network_project_id, self.parameters['zone'][:-2], + self.parameters['subnet3_data_replication']) + + if self.parameters.get('vpc0_firewall_rule_name'): + ha_params["vpc0FirewallRuleName"] = self.parameters['vpc0_firewall_ruleName'] + + if self.parameters.get('vpc1_firewall_rule_name'): + ha_params["vpc1FirewallRuleName"] = self.parameters['vpc1_firewall_rule_name'] + + if self.parameters.get('vpc2_firewall_rule_name'): + ha_params["vpc2FirewallRuleName"] = self.parameters['vpc2_firewall_rule_name'] + + if self.parameters.get('vpc3_firewall_rule_name'): + ha_params["vpc3FirewallRuleName"] = self.parameters['vpc3_firewall_rule_name'] + + json["haParams"] = ha_params + + api_url = '%s/working-environments' % self.rest_api.api_root_path + response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers) + if error is not None: + self.module.fail_json( + msg="Error: unexpected response on creating cvo gcp: %s, %s" % (str(error), str(response))) + working_environment_id = response['publicId'] + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60) + + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO GCP: %s" % str(err)) + return working_environment_id + + def update_cvo_gcp(self, working_environment_id, modify): + base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id) + for item in modify: + if item == 'svm_password': + response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'svm_name': + response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'gcp_labels': + tag_list = None + if 'gcp_labels' in self.parameters: + tag_list = self.parameters['gcp_labels'] + response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'gcp_labels', tag_list) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'tier_level': + response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'writing_speed_state': + response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'ontap_version': + response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + if item == 'instance_type' or item == 'license_type': + response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers, + self.parameters['instance_type'], + self.parameters['license_type']) + if error is not None: + self.module.fail_json(changed=False, msg=error) + + def delete_cvo_gcp(self, we_id): + """ + Delete GCP CVO + """ + api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id) + response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers) + if error is not None: + self.module.fail_json(msg="Error: unexpected response on deleting cvo gcp: %s, %s" % (str(error), str(response))) + + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60) + if err is not None: + self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting cvo gcp: %s" % str(err)) + + def apply(self): + working_environment_id = None + modify = None + + current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers, + self.parameters['name'], "gcp") + if current: + self.parameters['working_environment_id'] = current['publicId'] + # check the action + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if current and self.parameters['state'] != 'absent': + working_environment_id = current['publicId'] + modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'gcp') + if error is not None: + self.module.fail_json(changed=False, msg=error) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "create": + working_environment_id = self.create_cvo_gcp() + elif cd_action == "delete": + self.delete_cvo_gcp(current['publicId']) + else: + self.update_cvo_gcp(current['publicId'], modify) + + self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id) + + +def main(): + """ + Create Cloud Manager CVO for GCP class instance and invoke apply + :return: None + """ + obj_store = NetAppCloudManagerCVOGCP() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py new file mode 100644 index 000000000..cbdf64f13 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py @@ -0,0 +1,235 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_info +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_info +short_description: NetApp Cloud Manager info +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - This module allows you to gather various information about cloudmanager using REST APIs. + +options: + client_id: + required: true + type: str + description: + - The connector ID of the Cloud Manager Connector. + + gather_subsets: + type: list + elements: str + description: + - When supplied, this argument will restrict the information collected to a given subset. + - Possible values for this argument include + - 'working_environments_info' + - 'aggregates_info' + - 'accounts_info' + - 'account_info' + - 'agents_info' + - 'active_agents_info' + default: 'all' + +notes: +- Support check_mode +''' + +EXAMPLES = """ +- name: Get all available subsets + netapp.cloudmanager.na_cloudmanager_info: + client_id: "{{ client_id }}" + refresh_token: "{{ refresh_token }}" + gather_subsets: + - all + +- name: Collect data for cloud manager with indicated subsets + netapp.cloudmanager.na_cloudmanager_info: + client_id: "{{ client_id }}" + refresh_token: "{{ refresh_token }}" + gather_subsets: + - aggregates_info + - working_environments_info +""" + +RETURN = """ +info: + description: + - a dictionary of collected subsets + - each subset if in JSON format + returned: success + type: dict + sample: '{ + "info": { + "working_environments_info": [ + { + "azureVsaWorkingEnvironments": [], + "gcpVsaWorkingEnvironments": [], + "onPremWorkingEnvironments": [], + "vsaWorkingEnvironments": [ + { + "actionsRequired": null, + "activeActions": null, + "awsProperties": null, + "capacityFeatures": null, + "cbsProperties": null, + "cloudProviderName": "Amazon", + "cloudSyncProperties": null, + "clusterProperties": null, + "complianceProperties": null, + "creatorUserEmail": "samlp|NetAppSAML|test_user", + "cronJobSchedules": null, + "encryptionProperties": null, + "fpolicyProperties": null, + "haProperties": null, + "interClusterLifs": null, + "isHA": false, + "k8sProperties": null, + "monitoringProperties": null, + "name": "testAWS", + "ontapClusterProperties": null, + "publicId": "VsaWorkingEnvironment-3txYJOsX", + "replicationProperties": null, + "reservedSize": null, + "saasProperties": null, + "schedules": null, + "snapshotPolicies": null, + "status": null, + "supportRegistrationInformation": [], + "supportRegistrationProperties": null, + "supportedFeatures": null, + "svmName": "svm_testAWS", + "svms": null, + "tenantId": "Tenant-2345", + "workingEnvironmentType": "VSA" + } + ] + }, + null + ] + } + }' +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + + +class NetAppCloudmanagerInfo(object): + ''' + Contains methods to parse arguments, + derive details of CloudmanagerInfo objects + and send requests to CloudmanagerInfo via + the restApi + ''' + + def __init__(self): + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + gather_subsets=dict(type='list', elements='str', default='all'), + client_id=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic rest_api class + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = None + self.methods = dict( + working_environments_info=self.na_helper.get_working_environments_info, + aggregates_info=self.get_aggregates_info, + accounts_info=self.na_helper.get_accounts_info, + account_info=self.na_helper.get_account_info, + agents_info=self.na_helper.get_agents_info, + active_agents_info=self.na_helper.get_active_agents_info, + ) + self.headers = {} + if 'client_id' in self.parameters: + self.headers['X-Agent-Id'] = self.rest_api.format_client_id(self.parameters['client_id']) + + def get_aggregates_info(self, rest_api, headers): + ''' + Get aggregates info: there are 4 types of working environments. + Each of the aggregates will be categorized by working environment type and working environment id + ''' + aggregates = {} + # get list of working environments + working_environments, error = self.na_helper.get_working_environments_info(rest_api, headers) + if error is not None: + self.module.fail_json(msg="Error: Failed to get working environments: %s" % str(error)) + # Four types of working environments: + # azureVsaWorkingEnvironments, gcpVsaWorkingEnvironments, onPremWorkingEnvironments, vsaWorkingEnvironments + for working_env_type in working_environments: + we_aggregates = {} + # get aggregates for each working environment + for we in working_environments[working_env_type]: + provider = we['cloudProviderName'] + working_environment_id = we['publicId'] + self.na_helper.set_api_root_path(we, rest_api) + if provider != "Amazon": + api = '%s/aggregates/%s' % (rest_api.api_root_path, working_environment_id) + else: + api = '%s/aggregates?workingEnvironmentId=%s' % (rest_api.api_root_path, working_environment_id) + response, error, dummy = rest_api.get(api, None, header=headers) + if error: + self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error)) + we_aggregates[working_environment_id] = response + aggregates[working_env_type] = we_aggregates + return aggregates + + def get_info(self, func, rest_api): + ''' + Main get info function + ''' + return self.methods[func](rest_api, self.headers) + + def apply(self): + ''' + Apply action to the Cloud Manager + :return: None + ''' + info = {} + if 'all' in self.parameters['gather_subsets']: + self.parameters['gather_subsets'] = self.methods.keys() + for func in self.parameters['gather_subsets']: + if func in self.methods: + info[func] = self.get_info(func, self.rest_api) + else: + msg = '%s is not a valid gather_subset. Only %s are allowed' % (func, self.methods.keys()) + self.module.fail_json(msg=msg) + self.module.exit_json(changed=False, info=info) + + +def main(): + ''' + Main function + ''' + na_cloudmanager_info = NetAppCloudmanagerInfo() + na_cloudmanager_info.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py new file mode 100644 index 000000000..49e8e697e --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py @@ -0,0 +1,192 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_nss_account +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_nss_account +short_description: NetApp Cloud Manager nss account +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create and Delete nss account. + +options: + state: + description: + - Whether the specified nss account should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + client_id: + description: + - The connector ID of the Cloud Manager Connector. + required: true + type: str + + public_id: + description: + - The ID of the NSS account. + type: str + + name: + description: + - The name of the NSS account. + type: str + + username: + description: + - The NSS username. + required: true + type: str + + password: + description: + - The NSS password. + type: str + + vsa_list: + description: + - The working environment list. + type: list + elements: str + +notes: +- Support check_mode. +''' + +EXAMPLES = ''' +- name: Create nss account + netapp.cloudmanager.na_cloudmanager_nss_account: + state: present + name: test_cloud + username: test_cloud + password: password + client_id: your_client_id + refresh_token: your_refresh_token +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppCloudmanagerNssAccount(object): + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + name=dict(required=False, type='str'), + client_id=dict(required=True, type='str'), + username=dict(required=True, type='str'), + password=dict(required=False, type='str', no_log=True), + public_id=dict(required=False, type='str'), + vsa_list=dict(required=False, type='list', elements='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[['refresh_token', 'sa_client_id']], + required_together=[['sa_client_id', 'sa_secret_key']], + required_if=[ + ('state', 'present', ['password']), + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic rest_api class + self.rest_api = netapp_utils.CloudManagerRestAPI(self.module) + self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token() + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = '/occm/api/' + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + + def get_nss_account(self): + response, err, dummy = self.rest_api.send_request("GET", "%s/accounts" % ( + self.rest_api.api_root_path), None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on getting nss account: %s, %s" % (str(err), str(response))) + if response is None: + return None + nss_accounts = [] + if response.get('nssAccounts'): + nss_accounts = response['nssAccounts'] + if len(nss_accounts) == 0: + return None + result = dict() + for account in nss_accounts: + if account['nssUserName'] == self.parameters['username']: + if self.parameters.get('public_id') and self.parameters['public_id'] != account['publicId']: + self.module.fail_json(changed=False, msg="Error: public_id '%s' does not match username." + % account['publicId']) + else: + self.parameters['public_id'] = account['publicId'] + result['name'] = account['accountName'] + result['user_name'] = account['nssUserName'] + result['vsa_list'] = account['vsaList'] + return result + return None + + def create_nss_account(self): + account = dict() + if self.parameters.get('name'): + account['accountName'] = self.parameters['name'] + account['providerKeys'] = {'nssUserName': self.parameters['username'], + 'nssPassword': self.parameters['password']} + account['vsaList'] = [] + if self.parameters.get('vsa_list'): + account['vsaList'] = self.parameters['vsa_list'] + response, err, second_dummy = self.rest_api.send_request("POST", "%s/accounts/nss" % ( + self.rest_api.api_root_path), None, account, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on creating nss account: %s, %s" % (str(err), str(response))) + + def delete_nss_account(self): + response, err, second_dummy = self.rest_api.send_request("DELETE", "%s/accounts/%s" % ( + self.rest_api.api_root_path, self.parameters['public_id']), None, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on deleting nss account: %s, %s" % (str(err), str(response))) + return None + + def apply(self): + current = self.get_nss_account() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_nss_account() + elif cd_action == 'delete': + self.delete_nss_account() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + '''Main Function''' + account = NetAppCloudmanagerNssAccount() + account.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py new file mode 100644 index 000000000..299e13ecf --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py @@ -0,0 +1,471 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_snapmirror +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_snapmirror +short_description: NetApp Cloud Manager SnapMirror +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.6.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or Delete SnapMirror relationship on Cloud Manager. + +options: + + state: + description: + - Whether the specified snapmirror relationship should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + source_working_environment_name: + description: + - The working environment name of the source volume. + type: str + + destination_working_environment_name: + description: + - The working environment name of the destination volume. + type: str + + source_working_environment_id: + description: + - The public ID of the working environment of the source volume. + type: str + + destination_working_environment_id: + description: + - The public ID of the working environment of the destination volume. + type: str + + destination_aggregate_name: + description: + - The aggregate in which the volume will be created. + - If not provided, Cloud Manager chooses the best aggregate for you. + type: str + + policy: + description: + - The SnapMirror policy name. + type: str + default: 'MirrorAllSnapshots' + + max_transfer_rate: + description: + - Maximum transfer rate limit KB/s. + - Use 0 for no limit, otherwise use number between 1024 and 2,147,482,624. + type: int + default: 100000 + + source_svm_name: + description: + - The name of the source SVM. + - The default SVM name is used, if a name is not provided. + type: str + + destination_svm_name: + description: + - The name of the destination SVM. + - The default SVM name is used, if a name is not provided. + type: str + + source_volume_name: + description: + - The name of the source volume. + required: true + type: str + + destination_volume_name: + description: + - The name of the destination volume to be created for snapmirror relationship. + required: true + type: str + + schedule: + description: + - The name of the Schedule. + type: str + default: '1hour' + + provider_volume_type: + description: + - The underlying cloud provider volume type. + - For AWS ['gp3', 'gp2', 'io1', 'st1', 'sc1']. + - For Azure ['Premium_LRS','Standard_LRS','StandardSSD_LRS']. + - For GCP ['pd-balanced','pd-ssd','pd-standard']. + type: str + + capacity_tier: + description: + - The volume capacity tier for tiering cold data to object storage. + - The default values for each cloud provider are as follows, Amazon 'S3', Azure 'Blob', GCP 'cloudStorage'. + - If NONE, the capacity tier will not be set on volume creation. + type: str + choices: ['S3', 'Blob', 'cloudStorage', 'NONE'] + + tenant_id: + description: + - The NetApp account ID that the Connector will be associated with. To be used only when using FSx. + type: str + version_added: 21.14.0 + + client_id: + description: + - The connector ID of the Cloud Manager Connector. + required: true + type: str + +notes: +- Support check_mode. +''' + +EXAMPLES = ''' +- name: Create snapmirror with working_environment_name + netapp.cloudmanager.na_cloudmanager_snapmirror: + state: present + source_working_environment_name: source + destination_working_environment_name: dest + source_volume_name: source + destination_volume_name: source_copy + policy: MirrorAllSnapshots + schedule: 5min + max_transfer_rate: 102400 + client_id: client_id + refresh_token: refresh_token + +- name: Delete snapmirror + netapp.cloudmanager.na_cloudmanager_snapmirror: + state: absent + source_working_environment_name: source + destination_working_environment_name: dest + source_volume_name: source + destination_volume_name: source_copy + client_id: client_id + refresh_token: refresh_token +''' + +RETURN = r''' # ''' + + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI + + +PROVIDER_TO_CAPACITY_TIER = {'amazon': 'S3', 'azure': 'Blob', 'gcp': 'cloudStorage'} + + +class NetAppCloudmanagerSnapmirror: + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + source_working_environment_id=dict(required=False, type='str'), + destination_working_environment_id=dict(required=False, type='str'), + source_working_environment_name=dict(required=False, type='str'), + destination_working_environment_name=dict(required=False, type='str'), + destination_aggregate_name=dict(required=False, type='str'), + policy=dict(required=False, type='str', default='MirrorAllSnapshots'), + max_transfer_rate=dict(required=False, type='int', default='100000'), + schedule=dict(required=False, type='str', default='1hour'), + source_svm_name=dict(required=False, type='str'), + destination_svm_name=dict(required=False, type='str'), + source_volume_name=dict(required=True, type='str'), + destination_volume_name=dict(required=True, type='str'), + capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']), + provider_volume_type=dict(required=False, type='str'), + tenant_id=dict(required=False, type='str'), + client_id=dict(required=True, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[ + ['source_working_environment_id', 'source_working_environment_name'], + ['refresh_token', 'sa_client_id'], + ], + required_together=(['source_working_environment_id', 'destination_working_environment_id'], + ['source_working_environment_name', 'destination_working_environment_name'], + ['sa_client_id', 'sa_secret_key'], + ), + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = CloudManagerRestAPI(self.module) + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.rest_api.api_root_path = None + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + if self.rest_api.simulator: + self.headers.update({'x-simulator': 'true'}) + + def get_snapmirror(self): + source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers) + if err is not None: + self.module.fail_json(changed=False, msg=err) + + get_url = '/occm/api/replication/status/%s' % source_we_info['publicId'] + snapmirror_info, err, dummy = self.rest_api.send_request("GET", get_url, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error getting snapmirror relationship %s: %s.' % (err, snapmirror_info)) + sm_found = False + snapmirror = None + for sm in snapmirror_info: + if sm['destination']['volumeName'] == self.parameters['destination_volume_name']: + sm_found = True + snapmirror = sm + break + + if not sm_found: + return None + result = { + 'source_working_environment_id': source_we_info['publicId'], + 'destination_svm_name': snapmirror['destination']['svmName'], + 'destination_working_environment_id': dest_we_info['publicId'], + } + if not dest_we_info['publicId'].startswith('fs-'): + result['cloud_provider_name'] = dest_we_info['cloudProviderName'] + return result + + def create_snapmirror(self): + snapmirror_build_data = {} + replication_request = {} + replication_volume = {} + source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers) + if err is not None: + self.module.fail_json(changed=False, msg=err) + if self.parameters.get('capacity_tier') is not None: + if self.parameters['capacity_tier'] == 'NONE': + self.parameters.pop('capacity_tier') + else: + if dest_we_info.get('cloudProviderName'): + self.parameters['capacity_tier'] = PROVIDER_TO_CAPACITY_TIER[dest_we_info['cloudProviderName'].lower()] + + interclusterlifs_info = self.get_interclusterlifs(source_we_info['publicId'], dest_we_info['publicId']) + + if source_we_info['workingEnvironmentType'] != 'ON_PREM': + source_volumes = self.get_volumes(source_we_info, self.parameters['source_volume_name']) + else: + source_volumes = self.get_volumes_on_prem(source_we_info, self.parameters['source_volume_name']) + + if len(source_volumes) == 0: + self.module.fail_json(changed=False, msg='source volume not found') + + vol_found = False + vol_dest_quote = {} + source_volume_resp = {} + for vol in source_volumes: + if vol['name'] == self.parameters['source_volume_name']: + vol_found = True + vol_dest_quote = vol + source_volume_resp = vol + if self.parameters.get('source_svm_name') is not None and vol['svmName'] != self.parameters['source_svm_name']: + vol_found = False + if vol_found: + break + + if not vol_found: + self.module.fail_json(changed=False, msg='source volume not found') + + if self.parameters.get('source_svm_name') is None: + self.parameters['source_svm_name'] = source_volume_resp['svmName'] + + if self.parameters.get('destination_svm_name') is None: + if dest_we_info.get('svmName') is not None: + self.parameters['destination_svm_name'] = dest_we_info['svmName'] + else: + self.parameters['destination_working_environment_name'] = dest_we_info['name'] + dest_working_env_detail, err = self.na_helper.get_working_environment_details_by_name(self.rest_api, + self.headers, + self.parameters['destination_working_environment_name']) + if err: + self.module.fail_json(changed=False, msg='Error getting destination info %s: %s.' % (err, dest_working_env_detail)) + self.parameters['destination_svm_name'] = dest_working_env_detail['svmName'] + + if dest_we_info.get('workingEnvironmentType') and dest_we_info['workingEnvironmentType'] != 'ON_PREM'\ + and not dest_we_info['publicId'].startswith('fs-'): + quote = self.build_quote_request(source_we_info, dest_we_info, vol_dest_quote) + quote_response = self.quote_volume(quote) + replication_volume['numOfDisksApprovedToAdd'] = int(quote_response['numOfDisks']) + if 'iops' in quote: + replication_volume['iops'] = quote['iops'] + if 'throughput' in quote: + replication_volume['throughput'] = quote['throughput'] + if self.parameters.get('destination_aggregate_name') is not None: + replication_volume['advancedMode'] = True + else: + replication_volume['advancedMode'] = False + replication_volume['destinationAggregateName'] = quote_response['aggregateName'] + if self.parameters.get('provider_volume_type') is None: + replication_volume['destinationProviderVolumeType'] = source_volume_resp['providerVolumeType'] + + if self.parameters.get('capacity_tier') is not None: + replication_volume['destinationCapacityTier'] = self.parameters['capacity_tier'] + replication_request['sourceWorkingEnvironmentId'] = source_we_info['publicId'] + if dest_we_info['publicId'].startswith('fs-'): + replication_request['destinationFsxId'] = dest_we_info['publicId'] + else: + replication_request['destinationWorkingEnvironmentId'] = dest_we_info['publicId'] + replication_volume['sourceVolumeName'] = self.parameters['source_volume_name'] + replication_volume['destinationVolumeName'] = self.parameters['destination_volume_name'] + replication_request['policyName'] = self.parameters['policy'] + replication_request['scheduleName'] = self.parameters['schedule'] + replication_request['maxTransferRate'] = self.parameters['max_transfer_rate'] + replication_volume['sourceSvmName'] = source_volume_resp['svmName'] + replication_volume['destinationSvmName'] = self.parameters['destination_svm_name'] + replication_request['sourceInterclusterLifIps'] = [interclusterlifs_info['interClusterLifs'][0]['address']] + replication_request['destinationInterclusterLifIps'] = [interclusterlifs_info['peerInterClusterLifs'][0]['address']] + + snapmirror_build_data['replicationRequest'] = replication_request + snapmirror_build_data['replicationVolume'] = replication_volume + + if dest_we_info['publicId'].startswith('fs-'): + api = '/occm/api/replication/fsx' + elif dest_we_info['workingEnvironmentType'] != 'ON_PREM': + api = '/occm/api/replication/vsa' + else: + api = '/occm/api/replication/onprem' + + response, err, on_cloud_request_id = self.rest_api.send_request("POST", api, None, snapmirror_build_data, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error creating snapmirror relationship %s: %s.' % (err, response)) + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id)) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "snapmirror", "create", 20, 5) + if err is not None: + self.module.fail_json(changed=False, msg=err) + + def get_volumes(self, working_environment_detail, name): + self.na_helper.set_api_root_path(working_environment_detail, self.rest_api) + response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?workingEnvironmentId=%s&name=%s" % ( + self.rest_api.api_root_path, working_environment_detail['publicId'], name), None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error getting volume %s: %s.' % (err, response)) + return response + + def quote_volume(self, quote): + response, err, on_cloud_request_id = self.rest_api.send_request("POST", '%s/volumes/quote' % + self.rest_api.api_root_path, None, quote, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error quoting destination volume %s: %s.' % (err, response)) + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id)) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "quote", 20, 5) + if err is not None: + self.module.fail_json(changed=False, msg=err) + return response + + def get_volumes_on_prem(self, working_environment_detail, name): + response, err, dummy = self.rest_api.send_request("GET", "/occm/api/onprem/volumes?workingEnvironmentId=%s&name=%s" % + (working_environment_detail['publicId'], name), None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error getting volume on prem %s: %s.' % (err, response)) + return response + + def get_aggregate_detail(self, working_environment_detail, aggregate_name): + if working_environment_detail['workingEnvironmentType'] == 'ON_PREM': + api = "/occm/api/onprem/aggregates?workingEnvironmentId=%s" % working_environment_detail['publicId'] + else: + self.na_helper.set_api_root_path(working_environment_detail, self.rest_api) + api_root_path = self.rest_api.api_root_path + if working_environment_detail['cloudProviderName'] != "Amazon": + api = '%s/aggregates/%s' + else: + api = '%s/aggregates?workingEnvironmentId=%s' + api = api % (api_root_path, working_environment_detail['publicId']) + response, error, dummy = self.rest_api.get(api, header=self.headers) + if error: + self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error)) + for aggr in response: + if aggr['name'] == aggregate_name: + return aggr + return None + + def build_quote_request(self, source_we_info, dest_we_info, vol_dest_quote): + quote = dict() + quote['size'] = {'size': vol_dest_quote['size']['size'], 'unit': vol_dest_quote['size']['unit']} + quote['name'] = self.parameters['destination_volume_name'] + quote['snapshotPolicyName'] = vol_dest_quote['snapshotPolicy'] + quote['enableDeduplication'] = vol_dest_quote['deduplication'] + quote['enableThinProvisioning'] = vol_dest_quote['thinProvisioning'] + quote['enableCompression'] = vol_dest_quote['compression'] + quote['verifyNameUniqueness'] = True + quote['replicationFlow'] = True + + # Use source working environment to get physical properties info of volumes + aggregate = self.get_aggregate_detail(source_we_info, vol_dest_quote['aggregateName']) + if aggregate is None: + self.module.fail_json(changed=False, msg='Error getting aggregate on source volume') + # All the volumes in one aggregate have the same physical properties + if source_we_info['workingEnvironmentType'] != 'ON_PREM': + if aggregate['providerVolumes'][0]['diskType'] == 'gp3' or aggregate['providerVolumes'][0]['diskType'] == 'io1'\ + or aggregate['providerVolumes'][0]['diskType'] == 'io2': + quote['iops'] = aggregate['providerVolumes'][0]['iops'] + if aggregate['providerVolumes'][0]['diskType'] == 'gp3': + quote['throughput'] = aggregate['providerVolumes'][0]['throughput'] + quote['workingEnvironmentId'] = dest_we_info['publicId'] + quote['svmName'] = self.parameters['destination_svm_name'] + if self.parameters.get('capacity_tier') is not None: + quote['capacityTier'] = self.parameters['capacity_tier'] + + if self.parameters.get('provider_volume_type') is None: + quote['providerVolumeType'] = vol_dest_quote['providerVolumeType'] + else: + quote['providerVolumeType'] = self.parameters['provider_volume_type'] + + return quote + + def delete_snapmirror(self, sm_detail): + api_delete = '/occm/api/replication/%s/%s/%s' %\ + (sm_detail['destination_working_environment_id'], sm_detail['destination_svm_name'], self.parameters['destination_volume_name']) + dummy, err, dummy_second = self.rest_api.send_request("DELETE", api_delete, None, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error deleting snapmirror relationship %s: %s.' % (err, dummy)) + + def get_interclusterlifs(self, source_we_id, dest_we_id): + api_get = '/occm/api/replication/intercluster-lifs?peerWorkingEnvironmentId=%s&workingEnvironmentId=%s' % (dest_we_id, source_we_id) + response, err, dummy_second = self.rest_api.send_request("GET", api_get, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg='Error getting interclusterlifs %s: %s.' % (err, response)) + return response + + def apply(self): + current = self.get_snapmirror() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_snapmirror() + elif cd_action == 'delete': + self.delete_snapmirror(current) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + '''Main Function''' + volume = NetAppCloudmanagerSnapmirror() + volume.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py new file mode 100644 index 000000000..62c898c57 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py @@ -0,0 +1,660 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_cloudmanager_volume +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_cloudmanager_volume +short_description: NetApp Cloud Manager volume +extends_documentation_fragment: + - netapp.cloudmanager.netapp.cloudmanager +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, Modify or Delete volume on Cloud Manager. + +options: + state: + description: + - Whether the specified volume should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + description: + - The name of the volume. + required: true + type: str + + working_environment_name: + description: + - The working environment name where the volume will be created. + type: str + + working_environment_id: + description: + - The public ID of the working environment where the volume will be created. + type: str + + client_id: + description: + - The connector ID of the Cloud Manager Connector. + required: true + type: str + + size: + description: + - The size of the volume. + type: float + + size_unit: + description: + - The size unit of volume. + choices: ['GB'] + default: 'GB' + type: str + + snapshot_policy_name: + description: + - The snapshot policy name. + type: str + + provider_volume_type: + description: + - The underlying cloud provider volume type. + - For AWS is ["gp3", "gp2", "io1", "st1", "sc1"]. + - For Azure is ['Premium_LRS','Standard_LRS','StandardSSD_LRS']. + - For GCP is ['pd-balanced','pd-ssd','pd-standard']. + type: str + + enable_deduplication: + description: + - Enabling deduplication. + - Default to true if not specified. + type: bool + + enable_compression: + description: + - Enabling cpmpression. + - Default to true if not specified. + type: bool + + enable_thin_provisioning: + description: + - Enabling thin provisioning. + - Default to true if not specified. + type: bool + + svm_name: + description: + - The name of the SVM. The default SVM name is used, if a name is not provided. + type: str + + aggregate_name: + description: + - The aggregate in which the volume will be created. If not provided, Cloud Manager chooses the best aggregate. + type: str + + capacity_tier: + description: + - The volume's capacity tier for tiering cold data to object storage. + - The default values for each cloud provider are as follows. Amazon as 'S3', Azure as 'Blob', GCP as 'cloudStorage'. + - If 'NONE', the capacity tier will not be set on volume creation. + choices: ['NONE', 'S3', 'Blob', 'cloudStorage'] + type: str + + tiering_policy: + description: + - The tiering policy. + choices: ['none', 'snapshot_only', 'auto', 'all'] + type: str + + export_policy_type: + description: + - The export policy type (NFS protocol parameters). + type: str + + export_policy_ip: + description: + - Custom export policy list of IPs (NFS protocol parameters). + type: list + elements: str + + export_policy_nfs_version: + description: + - Export policy protocol (NFS protocol parameters). + type: list + elements: str + + iops: + description: + - Provisioned IOPS. Needed only when provider_volume_type is "io1". + type: int + + throughput: + description: + - Unit is Mb/s. Valid range 125-1000. + - Required only when provider_volume_type is 'gp3'. + type: int + + volume_protocol: + description: + - The protocol for the volume. This affects the provided parameters. + choices: ['nfs', 'cifs', 'iscsi'] + type: str + default: 'nfs' + + share_name: + description: + - Share name (CIFS protocol parameters). + type: str + + permission: + description: + - CIFS share permission type (CIFS protocol parameters). + type: str + + users: + description: + - List of users with the permission (CIFS protocol parameters). + type: list + elements: str + + igroups: + description: + - List of igroups (iSCSI protocol parameters). + type: list + elements: str + + os_name: + description: + - Operating system (iSCSI protocol parameters). + type: str + + tenant_id: + description: + - The NetApp account ID that the Connector will be associated with. To be used only when using FSx. + type: str + version_added: 21.20.0 + + initiators: + description: + - Set of attributes of Initiators (iSCSI protocol parameters). + type: list + elements: dict + suboptions: + iqn: + description: The initiator node name. + required: true + type: str + alias: + description: The alias which associates with the node. + required: true + type: str + +notes: +- Support check_mode. +''' + +EXAMPLES = ''' +- name: Create nfs volume with working_environment_name + netapp.cloudmanager.na_cloudmanager_volume: + state: present + name: test_vol + size: 15 + size_unit: GB + working_environment_name: working_environment_1 + client_id: client_id + refresh_token: refresh_token + svm_name: svm_1 + snapshot_policy_name: default + export_policy_type: custom + export_policy_ip: ["10.0.0.1/16"] + export_policy_nfs_version: ["nfs3","nfs4"] + +- name: Delete volume + netapp.cloudmanager.na_cloudmanager_volume: + state: absent + name: test_vol + working_environment_name: working_environment_1 + client_id: client_id + refresh_token: refresh_token + svm_name: svm_1 +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppCloudmanagerVolume(object): + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.cloudmanager_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + working_environment_id=dict(required=False, type='str'), + working_environment_name=dict(required=False, type='str'), + client_id=dict(required=True, type='str'), + size=dict(required=False, type='float'), + size_unit=dict(required=False, choices=['GB'], default='GB'), + snapshot_policy_name=dict(required=False, type='str'), + provider_volume_type=dict(required=False, type='str'), + enable_deduplication=dict(required=False, type='bool'), + enable_thin_provisioning=dict(required=False, type='bool'), + enable_compression=dict(required=False, type='bool'), + svm_name=dict(required=False, type='str'), + aggregate_name=dict(required=False, type='str'), + capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']), + tiering_policy=dict(required=False, type='str', choices=['none', 'snapshot_only', 'auto', 'all']), + export_policy_type=dict(required=False, type='str'), + export_policy_ip=dict(required=False, type='list', elements='str'), + export_policy_nfs_version=dict(required=False, type='list', elements='str'), + iops=dict(required=False, type='int'), + throughput=dict(required=False, type='int'), + volume_protocol=dict(required=False, type='str', choices=['nfs', 'cifs', 'iscsi'], default='nfs'), + share_name=dict(required=False, type='str'), + permission=dict(required=False, type='str'), + users=dict(required=False, type='list', elements='str'), + igroups=dict(required=False, type='list', elements='str'), + os_name=dict(required=False, type='str'), + tenant_id=dict(required=False, type='str'), + initiators=dict(required=False, type='list', elements='dict', options=dict( + alias=dict(required=True, type='str'), + iqn=dict(required=True, type='str'),)), + + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_one_of=[ + ['refresh_token', 'sa_client_id'], + ['working_environment_name', 'working_environment_id'], + ], + required_together=[['sa_client_id', 'sa_secret_key']], + required_if=[ + ['provider_volume_type', 'gp3', ['iops', 'throughput']], + ['provider_volume_type', 'io1', ['iops']], + ['capacity_tier', 'S3', ['tiering_policy']], + ], + # enable_thin_provisioning reflects storage efficiency. + required_by={ + 'capacity_tier': ('tiering_policy', 'enable_thin_provisioning'), + }, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic rest_api class + self.rest_api = netapp_utils.CloudManagerRestAPI(self.module) + self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token() + self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST'] + self.headers = { + 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id']) + } + if self.rest_api.simulator: + self.headers.update({'x-simulator': 'true'}) + if self.parameters.get('tenant_id'): + working_environment_detail, error = self.na_helper.get_aws_fsx_details(self.rest_api, self.headers, self.parameters['working_environment_name']) + elif self.parameters.get('working_environment_id'): + working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers) + else: + working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api, + self.headers, + self.parameters['working_environment_name']) + if working_environment_detail is None: + self.module.fail_json(msg="Error: Cannot find working environment, if it is an AWS FSxN, please provide tenant_id: %s" % str(error)) + self.parameters['working_environment_id'] = working_environment_detail['publicId']\ + if working_environment_detail.get('publicId') else working_environment_detail['id'] + self.na_helper.set_api_root_path(working_environment_detail, self.rest_api) + self.is_fsx = self.parameters['working_environment_id'].startswith('fs-') + + if self.parameters.get('svm_name') is None: + fsx_path = '' + if self.is_fsx: + fsx_path = '/svms' + response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s%s" % ( + self.rest_api.api_root_path, self.parameters['working_environment_id'], fsx_path), None, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on getting svm: %s, %s" % (str(err), str(response))) + if self.is_fsx: + self.parameters['svm_name'] = response[0]['name'] + else: + self.parameters['svm_name'] = response['svmName'] + + if self.parameters['volume_protocol'] == 'nfs': + extra_options = [] + for option in ['share_name', 'permission', 'users', 'igroups', 'os_name', 'initiator']: + if self.parameters.get(option) is not None: + extra_options.append(option) + if len(extra_options) > 0: + self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is nfs: " + " %s" % extra_options) + elif self.parameters['volume_protocol'] == 'cifs': + extra_options = [] + for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'igroups', 'os_name', 'initiator']: + if self.parameters.get(option) is not None: + extra_options.append(option) + if len(extra_options) > 0: + self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is cifs: " + "%s" % extra_options) + else: + extra_options = [] + for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'share_name', 'permission', 'users']: + if self.parameters.get(option) is not None: + extra_options.append(option) + if len(extra_options) > 0: + self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is iscsi: " + "%s" % extra_options) + + if self.parameters.get('igroups'): + current_igroups = [] + for igroup in self.parameters['igroups']: + current = self.get_igroup(igroup) + current_igroups.append(current) + if any(isinstance(x, dict) for x in current_igroups) and None in current_igroups: + self.module.fail_json(changed=False, msg="Error: can not specify existing" + "igroup and new igroup together.") + if len(current_igroups) > 1 and None in current_igroups: + self.module.fail_json(changed=False, msg="Error: can not create more than one igroups.") + if current_igroups[0] is None: + if self.parameters.get('initiators') is None: + self.module.fail_json(changed=False, msg="Error: initiator is required when creating new igroup.") + + if self.parameters.get('users'): + # When creating volume, 'Everyone' must have upper case E, 'everyone' will not work. + # When modifying volume, 'everyone' is fine. + new_users = [] + for user in self.parameters['users']: + if user.lower() == 'everyone': + new_users.append('Everyone') + else: + new_users.append(user) + self.parameters['users'] = new_users + + def get_volume(self): + if self.is_fsx: + query_param = 'fileSystemId' + else: + query_param = 'workingEnvironmentId' + response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?%s=%s" % ( + self.rest_api.api_root_path, query_param, self.parameters['working_environment_id']), None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on getting volume: %s, %s" % (str(err), str(response))) + target_vol = dict() + if response is None: + return None + for volume in response: + if volume['name'] == self.parameters['name']: + target_vol['name'] = volume['name'] + target_vol['enable_deduplication'] = volume['deduplication'] + target_vol['enable_thin_provisioning'] = volume['thinProvisioning'] + target_vol['enable_compression'] = volume['compression'] + if self.parameters.get('size'): + target_vol['size'] = volume['size']['size'] + if self.parameters.get('size_unit'): + target_vol['size_unit'] = volume['size']['unit'] + if self.parameters.get('export_policy_nfs_version') and volume.get('exportPolicyInfo'): + target_vol['export_policy_nfs_version'] = volume['exportPolicyInfo']['nfsVersion'] + if self.parameters.get('export_policy_ip') and volume.get('exportPolicyInfo'): + target_vol['export_policy_ip'] = volume['exportPolicyInfo']['ips'] + if self.parameters.get('export_policy_type') and volume.get('exportPolicyInfo'): + target_vol['export_policy_type'] = volume['exportPolicyInfo']['policyType'] + if self.parameters.get('snapshot_policy'): + target_vol['snapshot_policy'] = volume['snapshotPolicy'] + if self.parameters.get('provider_volume_type'): + target_vol['provider_volume_type'] = volume['providerVolumeType'] + if self.parameters.get('capacity_tier') and self.parameters.get('capacity_tier') != 'NONE': + target_vol['capacity_tier'] = volume['capacityTier'] + if self.parameters.get('tiering_policy'): + target_vol['tiering_policy'] = volume['tieringPolicy'] + if self.parameters.get('share_name') and volume.get('shareInfo'): + target_vol['share_name'] = volume['shareInfo'][0]['shareName'] + if self.parameters.get('users') and volume.get('shareInfo'): + if len(volume['shareInfo'][0]['accessControlList']) > 0: + target_vol['users'] = volume['shareInfo'][0]['accessControlList'][0]['users'] + else: + target_vol['users'] = [] + if self.parameters.get('users') and volume.get('shareInfo'): + if len(volume['shareInfo'][0]['accessControlList']) > 0: + target_vol['permission'] = volume['shareInfo'][0]['accessControlList'][0]['permission'] + else: + target_vol['permission'] = [] + if self.parameters.get('os_name') and volume.get('iscsiInfo'): + target_vol['os_name'] = volume['iscsiInfo']['osName'] + if self.parameters.get('igroups') and volume.get('iscsiInfo'): + target_vol['igroups'] = volume['iscsiInfo']['igroups'] + return target_vol + return None + + def create_volume(self): + exclude_list = ['client_id', 'size_unit', 'export_policy_name', 'export_policy_type', 'export_policy_ip', + 'export_policy_nfs_version', 'capacity_tier'] + quote = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list) + quote['verifyNameUniqueness'] = True # Always hard coded to true. + quote['unit'] = self.parameters['size_unit'] + quote['size'] = {'size': self.parameters['size'], 'unit': self.parameters['size_unit']} + create_aggregate_if_not_exists = True + if self.parameters.get('aggregate_name'): + quote['aggregateName'] = self.parameters['aggregate_name'] + create_aggregate_if_not_exists = False + + if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE": + quote['capacityTier'] = self.parameters['capacity_tier'] + + if self.parameters['volume_protocol'] == 'nfs': + quote['exportPolicyInfo'] = dict() + if self.parameters.get('export_policy_type'): + quote['exportPolicyInfo']['policyType'] = self.parameters['export_policy_type'] + if self.parameters.get('export_policy_ip'): + quote['exportPolicyInfo']['ips'] = self.parameters['export_policy_ip'] + if self.parameters.get('export_policy_nfs_version'): + quote['exportPolicyInfo']['nfsVersion'] = self.parameters['export_policy_nfs_version'] + elif self.parameters['volume_protocol'] == 'iscsi': + iscsi_info = self.iscsi_volume_helper() + quote.update(iscsi_info) + else: + quote['shareInfo'] = dict() + quote['shareInfo']['accessControl'] = dict() + quote['shareInfo']['accessControl']['users'] = self.parameters['users'] + if self.parameters.get('permission'): + quote['shareInfo']['accessControl']['permission'] = self.parameters['permission'] + if self.parameters.get('share_name'): + quote['shareInfo']['shareName'] = self.parameters['share_name'] + if not self.is_fsx: + response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/quote" % self.rest_api.api_root_path, + None, quote, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on quoting volume: %s, %s" % (str(err), str(response))) + quote['newAggregate'] = response['newAggregate'] + quote['aggregateName'] = response['aggregateName'] + quote['maxNumOfDisksApprovedToAdd'] = response['numOfDisks'] + else: + quote['fileSystemId'] = self.parameters['working_environment_id'] + if self.parameters.get('enable_deduplication'): + quote['deduplication'] = self.parameters.get('enable_deduplication') + if self.parameters.get('enable_thin_provisioning'): + quote['thinProvisioning'] = self.parameters.get('enable_thin_provisioning') + if self.parameters.get('enable_compression'): + quote['compression'] = self.parameters.get('enable_compression') + if self.parameters.get('snapshot_policy_name'): + quote['snapshotPolicy'] = self.parameters['snapshot_policy_name'] + if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE": + quote['capacityTier'] = self.parameters['capacity_tier'] + if self.parameters.get('tiering_policy'): + quote['tieringPolicy'] = self.parameters['tiering_policy'] + if self.parameters.get('provider_volume_type'): + quote['providerVolumeType'] = self.parameters['provider_volume_type'] + if self.parameters.get('iops'): + quote['iops'] = self.parameters.get('iops') + if self.parameters.get('throughput'): + quote['throughput'] = self.parameters.get('throughput') + response, err, on_cloud_request_id = self.rest_api.send_request("POST", "%s/volumes?createAggregateIfNotFound=%s" % ( + self.rest_api.api_root_path, create_aggregate_if_not_exists), None, quote, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected on creating volume: %s, %s" % (str(err), str(response))) + wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id)) + err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "create", 20, 5) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response wait_on_completion for creating volume: %s, %s" % (str(err), str(response))) + + def modify_volume(self, modify): + vol = dict() + if self.parameters['volume_protocol'] == 'nfs': + export_policy_info = dict() + if self.parameters.get('export_policy_type'): + export_policy_info['policyType'] = self.parameters['export_policy_type'] + if self.parameters.get('export_policy_ip'): + export_policy_info['ips'] = self.parameters['export_policy_ip'] + if self.parameters.get('export_policy_nfs_version'): + export_policy_info['nfsVersion'] = self.parameters['export_policy_nfs_version'] + vol['exportPolicyInfo'] = export_policy_info + elif self.parameters['volume_protocol'] == 'cifs': + vol['shareInfo'] = dict() + vol['shareInfo']['accessControlList'] = [] + vol['shareInfo']['accessControlList'].append(dict()) + if self.parameters.get('users'): + vol['shareInfo']['accessControlList'][0]['users'] = self.parameters['users'] + if self.parameters.get('permission'): + vol['shareInfo']['accessControlList'][0]['permission'] = self.parameters['permission'] + if self.parameters.get('share_name'): + vol['shareInfo']['shareName'] = self.parameters['share_name'] + if modify.get('snapshot_policy_name'): + vol['snapshotPolicyName'] = self.parameters.get('snapshot_policy_name') + if modify.get('tiering_policy'): + vol['tieringPolicy'] = self.parameters.get('tiering_policy') + response, err, dummy = self.rest_api.send_request("PUT", "%s/volumes/%s/%s/%s" % ( + self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'], + self.parameters['name']), None, vol, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on modifying volume: %s, %s" % (str(err), str(response))) + + def delete_volume(self): + response, err, dummy = self.rest_api.send_request("DELETE", "%s/volumes/%s/%s/%s" % ( + self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'], + self.parameters['name']), None, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on deleting volume: %s, %s" % (str(err), str(response))) + + def get_initiator(self, alias_name): + response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/initiator" % ( + self.rest_api.api_root_path), None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on getting initiator: %s, %s" % (str(err), str(response))) + result = dict() + if response is None: + return None + for initiator in response: + if initiator.get('aliasName') and initiator.get('aliasName') == alias_name: + result['alias'] = initiator.get('aliasName') + result['iqn'] = initiator.get('iqn') + return result + return None + + def create_initiator(self, initiator): + ini = self.na_helper.convert_module_args_to_api(initiator) + response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/initiator" % ( + self.rest_api.api_root_path), None, ini, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on creating initiator: %s, %s" % (str(err), str(response))) + + def get_igroup(self, igroup_name): + response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/igroups/%s/%s" % ( + self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name']), + None, None, header=self.headers) + if err is not None: + self.module.fail_json(changed=False, msg="Error: unexpected response on getting igroup: %s, %s" % (str(err), str(response))) + result = dict() + if response is None: + return None + for igroup in response: + if igroup['igroupName'] == igroup_name: + result['igroup_name'] = igroup['igroupName'] + result['os_type'] = igroup['osType'] + result['portset_name'] = igroup['portsetName'] + result['igroup_type'] = igroup['igroupType'] + result['initiators'] = igroup['initiators'] + return result + return None + + def iscsi_volume_helper(self): + quote = dict() + quote['iscsiInfo'] = dict() + if self.parameters.get('igroups'): + current_igroups = [] + for igroup in self.parameters['igroups']: + current = self.get_igroup(igroup) + current_igroups.append(current) + for igroup in current_igroups: + if igroup is None: + quote['iscsiInfo']['igroupCreationRequest'] = dict() + quote['iscsiInfo']['igroupCreationRequest']['igroupName'] = self.parameters['igroups'][0] + iqn_list = [] + for initiator in self.parameters['initiators']: + if initiator.get('iqn'): + iqn_list.append(initiator['iqn']) + current_initiator = self.get_initiator(initiator['alias']) + if current_initiator is None: + initiator_request = dict() + if initiator.get('alias'): + initiator_request['aliasName'] = initiator['alias'] + if initiator.get('iqn'): + initiator_request['iqn'] = initiator['iqn'] + self.create_initiator(initiator_request) + quote['iscsiInfo']['igroupCreationRequest']['initiators'] = iqn_list + quote['iscsiInfo']['osName'] = self.parameters['os_name'] + + else: + quote['iscsiInfo']['igroups'] = self.parameters['igroups'] + quote['iscsiInfo']['osName'] = self.parameters['os_name'] + return quote + + def apply(self): + current = self.get_volume() + cd_action, modify = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + unmodifiable = [] + for attr in modify: + if attr not in ['export_policy_ip', 'export_policy_nfs_version', 'snapshot_policy_name', 'users', + 'permission', 'tiering_policy', 'snapshot_policy_name']: + unmodifiable.append(attr) + if len(unmodifiable) > 0: + self.module.fail_json(changed=False, msg="%s cannot be modified." % str(unmodifiable)) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_volume() + elif cd_action == 'delete': + self.delete_volume() + elif modify: + self.modify_volume(modify) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + '''Main Function''' + volume = NetAppCloudmanagerVolume() + volume.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/cloudmanager/requirements.txt b/ansible_collections/netapp/cloudmanager/requirements.txt new file mode 100644 index 000000000..169c9049e --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/requirements.txt @@ -0,0 +1,10 @@ +requests +boto3 +botocore +azure-mgmt-compute>=20.0.0 +azure-mgmt-core>=1.2.2 +azure-mgmt-network>=18.0.0 +azure-mgmt-resource>=16.1.0 +azure-mgmt-storage>=17.1.0 +msrestazure +azure-common \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py new file mode 100644 index 000000000..f60ee6782 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py new file mode 100644 index 000000000..959cbaef5 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py @@ -0,0 +1,506 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" unit tests for module_utils netapp.py + + Provides wrappers for cloudmanager REST APIs +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# import copy # for deepcopy +import json +import pytest +import sys +try: + import requests.exceptions + HAS_REQUESTS_EXC = True +except ImportError: + HAS_REQUESTS_EXC = False + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils + +if (not netapp_utils.HAS_REQUESTS or not HAS_REQUESTS_EXC) and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockModule(): + ''' rough mock for an Ansible module class ''' + def __init__(self): + self.params = {} + + def fail_json(self, *args, **kwargs): # pylint: disable=unused-argument + """function to simulate fail_json: package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class mockResponse: + def __init__(self, json_data, status_code, headers=None, raise_action=None): + self.json_data = json_data + self.status_code = status_code + self.content = json_data + self.headers = headers or {} + self.raise_action = raise_action + + def raise_for_status(self): + pass + + def json(self): + if self.raise_action == 'bad_json': + raise ValueError(self.raise_action) + return self.json_data + + +def create_module(args): + argument_spec = netapp_utils.cloudmanager_host_argument_spec() + set_module_args(args) + module = basic.AnsibleModule(argument_spec) + module.fail_json = fail_json + return module + + +def create_restapi_object(args): + module = create_module(args) + return netapp_utils.CloudManagerRestAPI(module) + + +def mock_args(feature_flags=None, client_id=None): + args = { + 'refresh_token': 'ABCDEFGS' + } + if feature_flags is not None: + args['feature_flags'] = feature_flags + if client_id is not None: + args['client_id'] = client_id + return args + + +TOKEN_DICT = { + 'access_token': 'access_token', + 'token_type': 'token_type' +} + + +def test_missing_params(): + module = MockModule() + with pytest.raises(KeyError) as exc: + netapp_utils.CloudManagerRestAPI(module) + assert exc.match('refresh_token') + + +@patch('requests.request') +def test_get_token_refresh(mock_request): + ''' successfully get token using refresh token ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + ] + # get_token is called when the object is created + rest_api = create_restapi_object(mock_args()) + print(rest_api.token_type, rest_api.token) + assert rest_api.token_type == TOKEN_DICT['token_type'] + assert rest_api.token == TOKEN_DICT['access_token'] + + +@patch('requests.request') +def test_negative_get_token_none(mock_request): + ''' missing refresh token and Service Account ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + ] + # get_token is called when the object is created + args = dict(mock_args()) + args.pop('refresh_token') + # get_token is called when the object is created + with pytest.raises(AnsibleFailJson) as exc: + rest_api = create_restapi_object(args) + msg = 'Missing refresh_token or sa_client_id and sa_secret_key' + assert msg in exc.value.args[0]['msg'] + + +@patch('requests.request') +def test_get_token_sa(mock_request): + ''' successfully get token using Service Account ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + ] + # get_token is called when the object is created + args = dict(mock_args()) + args.pop('refresh_token') + args['sa_client_id'] = '123' + args['sa_secret_key'] = 'a1b2c3' + rest_api = create_restapi_object(args) + print(rest_api.token_type, rest_api.token) + assert rest_api.token_type == TOKEN_DICT['token_type'] + assert rest_api.token == TOKEN_DICT['access_token'] + + +@patch('requests.request') +def test_negative_get_token(mock_request): + ''' error on OAUTH request ''' + mock_request.side_effect = [ + mockResponse(json_data={'message': 'error message'}, status_code=206) + ] + # get_token is called when the object is created + with pytest.raises(AnsibleFailJson) as exc: + rest_api = create_restapi_object(mock_args()) + msg = 'Error acquiring token: error message' + assert msg in exc.value.args[0]['msg'] + + +@patch('requests.request') +def test_get_json(mock_request): + ''' get with no data ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'key': 'value'}, status_code=200, headers={'OnCloud-Request-Id': 'OCR_id'}) + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message == {'key': 'value'} + assert error is None + assert ocr == 'OCR_id' + + +@patch('time.sleep') +@patch('requests.request') +def test_get_retries(mock_request, dont_sleep): + ''' get with no data ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + requests.exceptions.ConnectionError('Max retries exceeded with url:'), + requests.exceptions.ConnectionError('Max retries exceeded with url:'), + mockResponse(json_data={'key': 'value'}, status_code=200, headers={'OnCloud-Request-Id': 'OCR_id'}) + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message == {'key': 'value'} + assert error is None + assert ocr == 'OCR_id' + + +@patch('time.sleep') +@patch('requests.request') +def test_get_retries_exceeded(mock_request, dont_sleep): + ''' get with no data ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + requests.exceptions.ConnectionError('Max retries exceeded with url:'), + requests.exceptions.ConnectionError('Max retries exceeded with url:'), + requests.exceptions.ConnectionError('Max retries exceeded with url:'), + mockResponse(json_data={'key': 'value'}, status_code=200, headers={'OnCloud-Request-Id': 'OCR_id'}) + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert 'Max retries exceeded with url:' in error + + +@patch('requests.request') +def test_empty_get_sent_bad_json(mock_request): + ''' get with invalid json ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data='anything', status_code=200, raise_action='bad_json') + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message is None + assert error is None + assert ocr is None + + +@patch('requests.request') +def test_empty_get_sent_203(mock_request): + ''' get with no data and 203 status code ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={}, status_code=203) + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message == {} + assert error is None + assert ocr is None + + +@patch('requests.request') +def test_negative_get_sent_203(mock_request): + ''' get with 203 status code - not sure we should error out here ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'message': 'error message'}, status_code=203) + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message == {'message': 'error message'} + assert error == 'error message' + assert ocr is None + + +@patch('requests.request') +def test_negative_get_sent_300(mock_request): + ''' get with 300 status code - 300 indicates an error ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={}, status_code=300) + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message == {} + assert error == '300' + assert ocr is None + + +@patch('requests.request') +def test_negative_get_raise_http_exc(mock_request): + ''' get with HTTPError exception ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + requests.exceptions.HTTPError('some exception') + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message is None + assert error == 'some exception' + assert ocr is None + + +@patch('requests.request') +def test_negative_get_raise_conn_exc(mock_request): + ''' get with ConnectionError exception ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + requests.exceptions.ConnectionError('some exception') + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message is None + assert error == 'some exception' + assert ocr is None + + +@patch('requests.request') +def test_negative_get_raise_oserror_exc(mock_request): + ''' get with a general exception ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception') + ] + rest_api = create_restapi_object(mock_args()) + message, error, ocr = rest_api.get('api', None) + print(message, error, ocr) + assert message is None + assert error == 'some exception' + assert ocr is None + + +def test_has_feature_success_default(): + ''' existing feature_flag with default ''' + flag = 'show_modified' + module = create_module(mock_args()) + value = netapp_utils.has_feature(module, flag) + assert value + + +def test_has_feature_success_user_true(): + ''' existing feature_flag with value set to True ''' + flag = 'user_deprecation_warning' + args = dict(mock_args({flag: True})) + module = create_module(args) + value = netapp_utils.has_feature(module, flag) + assert value + + +def test_has_feature_success_user_false(): + ''' existing feature_flag with value set to False ''' + flag = 'user_deprecation_warning' + args = dict(mock_args({flag: False})) + print(args) + module = create_module(args) + value = netapp_utils.has_feature(module, flag) + assert not value + + +def test_has_feature_invalid_key(): + ''' existing feature_flag with unknown key ''' + flag = 'deprecation_warning_bad_key' + module = create_module(mock_args()) + with pytest.raises(AnsibleFailJson) as exc: + netapp_utils.has_feature(module, flag) + msg = 'Internal error: unexpected feature flag: %s' % flag + assert exc.value.args[0]['msg'] == msg + + +def test_has_feature_invalid_bool(): + ''' existing feature_flag with non boolean value ''' + flag = 'deprecation_warning_key' + module = create_module(mock_args({flag: 'str'})) + with pytest.raises(AnsibleFailJson) as exc: + netapp_utils.has_feature(module, flag) + msg = "Error: expected bool type for feature flag" + assert msg in exc.value.args[0]['msg'] + + +STATUS_DICT = { + 'status': 1, + 'error': None +} + + +@patch('time.sleep') +@patch('requests.request') +def test_check_task_status(mock_request, mock_sleep): + ''' successful get with 2 retries ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception'), + requests.exceptions.ConnectionError('some exception'), + mockResponse(json_data=STATUS_DICT, status_code=200) + ] + rest_api = create_restapi_object(mock_args()) + rest_api.module.params['client_id'] = '123' + status, error_msg, error = rest_api.check_task_status('api') + assert status == STATUS_DICT['status'] + assert error_msg == STATUS_DICT['error'] + assert error is None + + +@patch('time.sleep') +@patch('requests.request') +def test_negative_check_task_status(mock_request, mock_sleep): + ''' get with 4 failed retries ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception'), + requests.exceptions.ConnectionError('some exception'), + requests.exceptions.ConnectionError('some exception'), + requests.exceptions.HTTPError('some exception'), + ] + rest_api = create_restapi_object(mock_args()) + rest_api.module.params['client_id'] = '123' + status, error_msg, error = rest_api.check_task_status('api') + assert status == 0 + assert error_msg == '' + assert error == 'some exception' + + +@patch('time.sleep') +@patch('requests.request') +def test_wait_on_completion(mock_request, mock_sleep): + ''' successful get with 2 retries ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception'), + requests.exceptions.ConnectionError('some exception'), + mockResponse(json_data=STATUS_DICT, status_code=200) + ] + rest_api = create_restapi_object(mock_args()) + rest_api.module.params['client_id'] = '123' + error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1) + assert error is None + + +@patch('time.sleep') +@patch('requests.request') +def test_negative_wait_on_completion_failure(mock_request, mock_sleep): + ''' successful get with 2 retries, but status is -1 ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception'), + requests.exceptions.ConnectionError('some exception'), + mockResponse(json_data={'status': -1, 'error': 'task_error'}, status_code=200) + ] + rest_api = create_restapi_object(mock_args()) + rest_api.module.params['client_id'] = '123' + error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1) + assert error == 'Failed to task action, error: task_error' + + +@patch('time.sleep') +@patch('requests.request') +def test_negative_wait_on_completion_error(mock_request, mock_sleep): + ''' get with 4 failed retries ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception'), + requests.exceptions.ConnectionError('some exception'), + requests.exceptions.ConnectionError('some exception'), + requests.exceptions.HTTPError('some http exception'), + ] + rest_api = create_restapi_object(mock_args()) + rest_api.module.params['client_id'] = '123' + error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1) + assert error == 'some http exception' + + +@patch('time.sleep') +@patch('requests.request') +def test_negative_wait_on_completion_timeout(mock_request, mock_sleep): + ''' successful get with 2 retries, but status is 0 ''' + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + OSError('some exception'), + requests.exceptions.ConnectionError('some exception'), + mockResponse(json_data={'status': 0, 'error': 'task_error'}, status_code=200), + mockResponse(json_data={'status': 0, 'error': 'task_error'}, status_code=200), + mockResponse(json_data={'status': 0, 'error': 'task_error'}, status_code=200) + ] + rest_api = create_restapi_object(mock_args()) + rest_api.module.params['client_id'] = '123' + error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1) + assert error == 'Taking too long for action to task or not properly setup' diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py new file mode 100644 index 000000000..33041f64f --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py @@ -0,0 +1,578 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" unit tests for module_utils netapp.py + + Provides wrappers for cloudmanager REST APIs +""" + +from __future__ import (absolute_import, division, print_function) +from logging import error +__metaclass__ = type + +# import copy # for deepcopy +import json +import sys +import pytest +try: + import requests.exceptions + HAS_REQUESTS_EXC = True +except ImportError: + HAS_REQUESTS_EXC = False + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import cmp as nm_cmp, NetAppModule +if (not netapp_utils.HAS_REQUESTS or not HAS_REQUESTS_EXC) and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class mockResponse: + def __init__(self, json_data, status_code, headers=None): + self.json_data = json_data + self.status_code = status_code + self.content = json_data + self.headers = headers or {} + + def json(self): + return self.json_data + + +def create_module(args): + argument_spec = netapp_utils.cloudmanager_host_argument_spec() + set_module_args(args) + module = basic.AnsibleModule(argument_spec) + return module + + +def create_restapi_object(args): + module = create_module(args) + return netapp_utils.CloudManagerRestAPI(module) + + +def mock_args(feature_flags=None, client_id=None): + args = { + 'refresh_token': 'ABCDEFGS' + } + return args + + +TOKEN_DICT = { + 'access_token': 'access_token', + 'token_type': 'token_type' +} + + +def test_cmp(): + assert nm_cmp(None, 'x') == -1 + assert nm_cmp('y', 'x') == 1 + assert nm_cmp('y', 'X') == 1 + assert nm_cmp(['x', 'y'], ['x', 'X']) == 1 + assert nm_cmp(['x', 'x'], ['x', 'X']) == 0 + + +def test_set_parameters(): + helper = NetAppModule() + helper.set_parameters({'a': None, 'b': 'b'}) + assert 'a' not in helper.parameters + assert 'b' in helper.parameters + + +def test_cd_action(): + desired = {} + helper = NetAppModule() + assert helper.get_cd_action(None, desired) == 'create' + desired['state'] = 'present' + assert helper.get_cd_action(None, desired) == 'create' + assert helper.get_cd_action({}, desired) is None + desired['state'] = 'absent' + assert helper.get_cd_action(None, desired) is None + assert helper.get_cd_action({}, desired) == 'delete' + + +def test_compare_and_update_values(): + current = {'a': 'a', 'b': 'b'} + desired = {} + desired_key = [] + helper = NetAppModule() + assert helper.compare_and_update_values(current, desired, desired_key) == ({}, False) + desired_key = ['a'] + assert helper.compare_and_update_values(current, desired, desired_key) == ({'a': 'a'}, False) + desired = {'a': 'a'} + assert helper.compare_and_update_values(current, desired, desired_key) == ({'a': 'a'}, False) + desired = {'a': 'c'} + assert helper.compare_and_update_values(current, desired, desired_key) == ({'a': 'c'}, True) + + +@patch('requests.request') +def test_get_working_environments_info(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'a': 'b'}, status_code=200), + mockResponse(json_data={'c': 'd'}, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_working_environments_info(rest_api, '') == ({'a': 'b'}, None) + assert helper.get_working_environments_info(rest_api, '') == ({'c': 'd'}, '500') + + +def test_look_up_working_environment_by_name_in_list(): + we_list = [{'name': 'bob', 'b': 'b'}, {'name': 'chuck', 'c': 'c'}] + helper = NetAppModule() + assert helper.look_up_working_environment_by_name_in_list(we_list, 'bob') == (we_list[0], None) + error = "look_up_working_environment_by_name_in_list: Working environment not found" + assert helper.look_up_working_environment_by_name_in_list(we_list, 'alice') == (None, error) + + +@patch('requests.request') +def test_get_working_environment_details_by_name(mock_request): + we_list = [{'name': 'bob', 'b': 'b'}, {'name': 'chuck', 'c': 'c'}] + json_data = {'onPremWorkingEnvironments': [], + 'gcpVsaWorkingEnvironments': [], + 'azureVsaWorkingEnvironments': [], + 'vsaWorkingEnvironments': [] + } + json_data_onprem = dict(json_data) + json_data_onprem['onPremWorkingEnvironments'] = we_list + json_data_gcp = dict(json_data) + json_data_gcp['gcpVsaWorkingEnvironments'] = we_list + json_data_azure = dict(json_data) + json_data_azure['azureVsaWorkingEnvironments'] = we_list + json_data_aws = dict(json_data) + json_data_aws['vsaWorkingEnvironments'] = we_list + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'a': 'b'}, status_code=500), # exists + mockResponse(json_data={'a': 'b'}, status_code=200), # exists + mockResponse(json_data={'c': 'd'}, status_code=400), # get all + mockResponse(json_data={'a': 'b'}, status_code=200), # exists + mockResponse(json_data=json_data_onprem, status_code=200), # get all + mockResponse(json_data={'a': 'b'}, status_code=200), # exists + mockResponse(json_data=json_data_gcp, status_code=200), # get all + mockResponse(json_data={'a': 'b'}, status_code=200), # exists + mockResponse(json_data=json_data_azure, status_code=200), # get all + mockResponse(json_data={'a': 'b'}, status_code=200), # exists + mockResponse(json_data=json_data_aws, status_code=200), # get all + mockResponse(json_data={'a': 'b'}, status_code=200), # exists + mockResponse(json_data=json_data, status_code=200), # get all + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_working_environment_details_by_name(rest_api, '', 'name') == (None, '500') + assert helper.get_working_environment_details_by_name(rest_api, '', 'name') == (None, '400') + assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None) + assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None) + assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None) + assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None) + error = "get_working_environment_details_by_name: Working environment not found" + assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (None, error) + + +@patch('requests.request') +def test_get_working_environment_details(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'key': [{'a': 'b'}]}, status_code=200), + mockResponse(json_data={'key': [{'c': 'd'}]}, status_code=500) + ] + helper = NetAppModule() + args = dict(mock_args()) + rest_api = create_restapi_object(args) + helper.parameters['working_environment_id'] = 'test_we' + assert helper.get_working_environment_details(rest_api, '') == ({'key': [{'a': 'b'}]}, None) + error = "Error: get_working_environment_details 500" + assert helper.get_working_environment_details(rest_api, '') == (None, error) + + +@patch('requests.request') +def test_get_working_environment_detail_for_snapmirror(mock_request): + json_data = {'onPremWorkingEnvironments': [], + 'gcpVsaWorkingEnvironments': [], + 'azureVsaWorkingEnvironments': [], + 'vsaWorkingEnvironments': [] + } + json_data_source = dict(json_data) + json_data_source['onPremWorkingEnvironments'] = [{'name': 'test_we_s'}] + json_data_destination = dict(json_data) + json_data_destination['onPremWorkingEnvironments'] = [{'name': 'test_we_d'}] + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + # by id, first test + mockResponse(json_data={'key': [{'publicId': 'test_we_s'}]}, status_code=200), # env details source + mockResponse(json_data={'key': [{'publicId': 'test_we_d'}]}, status_code=200), # env details dest + # by id, second test + mockResponse(json_data={'key': [{'c': 'd'}]}, status_code=500), # error source + # by id, third test + mockResponse(json_data={'key': [{'publicId': 'test_we_s'}]}, status_code=200), # env details source + mockResponse(json_data={'key': [{'e': 'f'}]}, status_code=500), # error source + # by name, first test + mockResponse(json_data={'a': 'b'}, status_code=200), # exists source + mockResponse(json_data=json_data_source, status_code=200), # env details source + mockResponse(json_data={'a': 'b'}, status_code=200), # exists dest + mockResponse(json_data=json_data_destination, status_code=200), # env details dest + # by name, second test + mockResponse(json_data={'key': {'c': 'd'}}, status_code=500), # error source + # by name, third test + mockResponse(json_data={'a': 'b'}, status_code=200), # exists source + mockResponse(json_data=json_data_source, status_code=200), # env details source + mockResponse(json_data={'key': {'e': 'f'}}, status_code=500), # error source + ] + helper = NetAppModule() + args = dict(mock_args()) + rest_api = create_restapi_object(args) + # search by id + helper.parameters['source_working_environment_id'] = 'test_we_s' + helper.parameters['destination_working_environment_id'] = 'test_we_d' + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == ({'publicId': 'test_we_s'}, {'publicId': 'test_we_d'}, None) + error = "Error getting WE info: 500: {'key': [{'c': 'd'}]}" + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error) + error = "Error getting WE info: 500: {'key': [{'e': 'f'}]}" + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error) + # search by name + del helper.parameters['source_working_environment_id'] + del helper.parameters['destination_working_environment_id'] + helper.parameters['source_working_environment_name'] = 'test_we_s' + helper.parameters['destination_working_environment_name'] = 'test_we_d' + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == ({'name': 'test_we_s'}, {'name': 'test_we_d'}, None) + error = '500' + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error) + error = '500' + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error) + # no destination id nor name + del helper.parameters['destination_working_environment_name'] + error = 'Cannot find working environment by destination_working_environment_id or destination_working_environment_name' + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error) + # no source id nor name + del helper.parameters['source_working_environment_name'] + error = 'Cannot find working environment by source_working_environment_id or source_working_environment_name' + assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error) + + +def test_create_account(): + helper = NetAppModule() + error = "Error: creating an account is not supported." + assert helper.create_account("rest_api") == (None, error) + + +@patch('requests.request') +def test_get_or_create_account(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), + mockResponse(json_data=[], status_code=200), + mockResponse(json_data={'c': 'd'}, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_or_create_account(rest_api) == ('account_id', None) + error = 'Error: account cannot be located - check credentials or provide account_id.' + assert helper.get_or_create_account(rest_api) == (None, error) + error = '500' + assert helper.get_or_create_account(rest_api) == (None, error) + + +@patch('requests.request') +def test_get_account_info(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), + mockResponse(json_data=[], status_code=200), + mockResponse(json_data={'c': 'd'}, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_account_info(rest_api, '') == ([{'accountPublicId': 'account_id'}], None) + assert helper.get_account_info(rest_api, '') == ([], None) + assert helper.get_account_info(rest_api, '') == (None, '500') + + +@patch('requests.request') +def test_get_account_id(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), + mockResponse(json_data=[], status_code=200), + mockResponse(json_data={'c': 'd'}, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_account_id(rest_api) == ('account_id', None) + error = 'Error: no account found - check credentials or provide account_id.' + assert helper.get_account_id(rest_api) == (None, error) + error = '500' + assert helper.get_account_id(rest_api) == (None, error) + + +@patch('requests.request') +def test_get_accounts_info(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), + mockResponse(json_data={'c': 'd'}, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_accounts_info(rest_api, '') == ([{'accountPublicId': 'account_id'}], None) + error = '500' + assert helper.get_accounts_info(rest_api, '') == (None, error) + + +def test_set_api_root_path(): + helper = NetAppModule() + helper.parameters['working_environment_id'] = 'abc' + working_environment_details = {'cloudProviderName': 'Amazon', 'isHA': False} + helper.set_api_root_path(working_environment_details, helper) + assert helper.api_root_path == '/occm/api/vsa' + working_environment_details = {'cloudProviderName': 'Other', 'isHA': False} + helper.set_api_root_path(working_environment_details, helper) + assert helper.api_root_path == '/occm/api/other/vsa' + working_environment_details = {'cloudProviderName': 'Other', 'isHA': True} + helper.set_api_root_path(working_environment_details, helper) + assert helper.api_root_path == '/occm/api/other/ha' + + +@patch('requests.request') +def test_get_occm_agents_by_account(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'a': 'b'}], status_code=200), + mockResponse(json_data=[{'c': 'd'}], status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_occm_agents_by_account(rest_api, '') == ([{'a': 'b'}], None) + error = '500' + assert helper.get_occm_agents_by_account(rest_api, '') == ([{'c': 'd'}], error) + + +@patch('requests.request') +def test_get_occm_agents_by_name(mock_request): + json_data = {'agents': + [{'name': '', 'provider': ''}, + {'name': 'a1', 'provider': 'p1'}, + {'name': 'a1', 'provider': 'p1'}, + {'name': 'a1', 'provider': 'p2'}, + {'name': 'a2', 'provider': 'p1'}, + ]} + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=json_data, status_code=200), + mockResponse(json_data=json_data, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + expected = [agent for agent in json_data['agents'] if agent['name'] == 'a1' and agent['provider'] == 'p1'] + assert helper.get_occm_agents_by_name(rest_api, 'account', 'a1', 'p1') == (expected, None) + error = '500' + assert helper.get_occm_agents_by_name(rest_api, 'account', 'a1', 'p1') == (expected, error) + + +@patch('requests.request') +def test_get_agents_info(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id + mockResponse(json_data=[{'a': 'b'}], status_code=200), + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id + mockResponse(json_data=[{'c': 'd'}], status_code=500), + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=400), # get account_id + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + assert helper.get_agents_info(rest_api, '') == ([{'a': 'b'}], None) + error = '500' + assert helper.get_agents_info(rest_api, '') == ([{'c': 'd'}], error) + error = '400' + assert helper.get_agents_info(rest_api, '') == (None, error) + + +@patch('requests.request') +def test_get_active_agents_info(mock_request): + json_data = {'agents': + [{'name': '', 'provider': '', 'agentId': 1, 'status': ''}, + {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': 'active'}, + {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': ''}, + {'name': 'a1', 'provider': 'p2', 'agentId': 1, 'status': 'active'}, + {'name': 'a2', 'provider': 'p1', 'agentId': 1, 'status': 'active'}, + ]} + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id + mockResponse(json_data=json_data, status_code=200), + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id + mockResponse(json_data=json_data, status_code=500), + mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=400), # get account_id + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + active = [agent for agent in json_data['agents'] if agent['status'] == 'active'] + expected = [{'name': agent['name'], 'client_id': agent['agentId'], 'provider': agent['provider']} for agent in active] + assert helper.get_active_agents_info(rest_api, '') == (expected, None) + error = '500' + assert helper.get_active_agents_info(rest_api, '') == (expected, error) + error = '400' + assert helper.get_active_agents_info(rest_api, '') == (None, error) + + +@patch('requests.request') +def test_get_occm_agent_by_id(mock_request): + json_data = {'agent': + {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': 'active'} + } + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=json_data, status_code=200), + mockResponse(json_data=json_data, status_code=500), + mockResponse(json_data={'a': 'b'}, status_code=500), + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + expected = json_data['agent'] + assert helper.get_occm_agent_by_id(rest_api, '') == (expected, None) + error = '500' + assert helper.get_occm_agent_by_id(rest_api, '') == (expected, error) + assert helper.get_occm_agent_by_id(rest_api, '') == ({'a': 'b'}, error) + + +@patch('requests.request') +def test_check_occm_status(mock_request): + json_data = {'agent': + {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': 'active'} + } + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=json_data, status_code=200), + mockResponse(json_data=json_data, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + expected = json_data + assert helper.check_occm_status(rest_api, '') == (expected, None) + error = '500' + assert helper.check_occm_status(rest_api, '') == (expected, error) + + +@patch('requests.request') +def test_register_agent_to_service(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={}, status_code=200), + mockResponse(json_data={}, status_code=200), + mockResponse(json_data={}, status_code=500) + ] + helper = NetAppModule() + rest_api = create_restapi_object(mock_args()) + helper.parameters['account_id'] = 'account_id' + helper.parameters['company'] = 'company' + helper.parameters['region'] = 'region' + helper.parameters['subnet_id'] = 'subnet_id' + expected = {} + assert helper.register_agent_to_service(rest_api, 'provider', 'vpc') == (expected, None) + args, kwargs = mock_request.call_args + body = kwargs['json'] + assert 'placement' in body + assert 'network' in body['placement'] + assert body['placement']['network'] == 'vpc' + body_other = body + assert helper.register_agent_to_service(rest_api, 'AWS', 'vpc') == (expected, None) + args, kwargs = mock_request.call_args + body = kwargs['json'] + assert 'placement' in body + assert 'network' in body['placement'] + assert body['placement']['network'] == 'vpc' + assert body_other != body + body['placement']['provider'] = 'provider' + assert body_other == body + error = '500' + assert helper.register_agent_to_service(rest_api, 'provider', 'vpc') == (expected, error) + + +@patch('requests.request') +def test_delete_occm(mock_request): + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'result': 'any'}, status_code=200), + mockResponse(json_data={'result': 'any'}, status_code=500), + ] + helper = NetAppModule() + helper.parameters['account_id'] = 'account_id' + rest_api = create_restapi_object(mock_args()) + assert helper.delete_occm(rest_api, '') == ({'result': 'any'}, None) + error = '500' + assert helper.delete_occm(rest_api, '') == ({'result': 'any'}, error) + + +@patch('requests.request') +def test_delete_occm_agents(mock_request): + agents = [{'agentId': 'a1'}, + {'agentId': 'a2'}] + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data={'result': 'any'}, status_code=200), # a1 + mockResponse(json_data={'result': 'any'}, status_code=200), # a2 + mockResponse(json_data={'result': 'any'}, status_code=500), # a1 + mockResponse(json_data={'result': 'any'}, status_code=200), # a2 + mockResponse(json_data={'result': 'any'}, status_code=200), # a1 + mockResponse(json_data={'result': 'any'}, status_code=200), # a2 + ] + helper = NetAppModule() + helper.parameters['account_id'] = 'account_id' + rest_api = create_restapi_object(mock_args()) + assert helper.delete_occm_agents(rest_api, agents) == [] + error = '500' + assert helper.delete_occm_agents(rest_api, agents) == [({'result': 'any'}, error)] + agents.append({'a': 'b'}) + error = "unexpected agent contents: {'a': 'b'}" + assert helper.delete_occm_agents(rest_api, agents) == [(None, error)] + + +@patch('requests.request') +def test_get_tenant(mock_request): + tenants = [{'publicId': 'a1'}, + {'publicId': 'a2'}] + mock_request.side_effect = [ + mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH + mockResponse(json_data=tenants, status_code=200), # get success + mockResponse(json_data={'result': 'any'}, status_code=500), # get error + ] + helper = NetAppModule() + # helper.parameters['account_id'] = 'account_id' + rest_api = create_restapi_object(mock_args()) + assert helper.get_tenant(rest_api, '') == ('a1', None) + error = "Error: unexpected response on getting tenant for cvo: 500, {'result': 'any'}" + assert helper.get_tenant(rest_api, '') == (None, error) diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py new file mode 100644 index 000000000..b24778f47 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py @@ -0,0 +1,77 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" unit tests for module_utils netapp_module.py + + Provides utility functions for cloudmanager REST APIs +""" + +from __future__ import (absolute_import, division, print_function) +from logging import error +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch +from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule + +if sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as builtins not defined for 2.6 and 2.7') + + +@patch('builtins.open') +def test_certificates(open): + open.return_value = OPEN(data=b'1234') + helper = NetAppModule() + assert helper.encode_certificates('test') == ('MTIzNA==', None) + open.return_value = OPEN(data=b'') + helper = NetAppModule() + assert helper.encode_certificates('test') == (None, 'Error: file is empty') + open.return_value = OPEN(raise_exception=True) + helper = NetAppModule() + assert helper.encode_certificates('test') == (None, 'intentional error') + + +class OPEN: + '''we could use mock_open but I'm not sure it's available in every python version ''' + def __init__(self, data=b'abcd', raise_exception=False): + self.data = data + self.raise_exception = raise_exception + + def read(self): + return self.data + # the following two methods are associated with "with" in with open ... + + def __enter__(self): + if self.raise_exception: + raise OSError('intentional error') + return self + + def __exit__(self, *args): + pass diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py new file mode 100644 index 000000000..db30ada89 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py @@ -0,0 +1,297 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate \ + import NetAppCloudmanagerAggregate as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'number_of_disks': 2, + 'disk_size_size': 100, + 'disk_size_unit': 'GB', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_create_cloudmanager_aggregate(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'number_of_disks': 2, + 'disk_size_size': 100, + 'disk_size_unit': 'GB', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_create_cloudmanager_aggregate_by_workingenv_name(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'working_environment_name': 'wkone', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'number_of_disks': 2, + 'disk_size_size': 100, + 'disk_size_unit': 'GB', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_delete_cloudmanager_aggregate(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'number_of_disks': 2, + 'disk_size_size': 100, + 'disk_size_unit': 'GB', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_delete_cloudmanager_aggregate_by_workingenv_name(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'number_of_disks': 2, + 'disk_size_size': 100, + 'disk_size_unit': 'GB', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_update_cloudmanager_aggregate(self): + return dict({ + 'state': 'present', + 'name': 'TestCMAggregate', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'number_of_disks': 3, + 'disk_size_size': 100, + 'disk_size_unit': 'GB', + 'refresh_token': 'myrefresh_token', + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + def test_module_fail_when_required_args_present(self, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required ars are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch( + 'ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_aggregate_pass(self, get_post_api, get_aggregate_api, get_token): + set_module_args(self.set_args_create_cloudmanager_aggregate()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + get_aggregate_api.return_value = None + get_post_api.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_aggregate: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch( + 'ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + def test_delete_cloudmanager_aggregate_pass(self, get_delete_api, get_aggregate_api, get_token): + set_module_args(self.set_args_delete_cloudmanager_aggregate()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + my_aggregate = { + 'name': 'Dummyname', + 'state': 'online', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + 'disks': [{'device': 'xvdh vol-313', 'position': 'data', 'vmDiskProperties': None, + 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12h'}, + {'device': 'xvdi vol-314', 'position': 'data', 'vmDiskProperties': None, + 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12i'}], + 'homeNode': 'testAWSa-01', + } + get_aggregate_api.return_value = my_aggregate + get_delete_api.return_value = 'Aggregated Deleted', None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_aggregate: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_update_cloudmanager_aggregate_pass(self, get_post_api, get_aggregate_api, get_token): + set_module_args(self.set_args_update_cloudmanager_aggregate()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + my_aggregate = { + 'name': 'Dummyname', + 'state': 'online', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + 'disks': [{'device': 'xvdh vol-313', 'position': 'data', 'vmDiskProperties': None, + 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12h'}, + {'device': 'xvdi vol-314', 'position': 'data', 'vmDiskProperties': None, + 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12i'}], + 'homeNode': 'testAWSa-01', + } + get_aggregate_api.return_value = my_aggregate + get_post_api.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_update_cloudmanager_aggregate: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_aggregate_by_workingenv_name_pass(self, get_post_api, get_we, get_aggregate_api, get_token): + data = self.set_args_create_cloudmanager_aggregate_by_workingenv_name() + get_token.return_value = 'test', 'test' + my_we = { + 'name': 'test', + 'publicId': 'test', + 'cloudProviderName': 'Amazon'} + get_we.return_value = my_we, None + data['working_environment_id'] = my_we['publicId'] + set_module_args(data) + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + get_aggregate_api.return_value = None + get_post_api.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_aggregate: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + def test_delete_cloudmanager_aggregate_by_workingenv_name_pass(self, get_delete_api, get_we, get_aggregate_api, get_token): + data = self.set_args_delete_cloudmanager_aggregate_by_workingenv_name() + my_we = { + 'name': 'test', + 'publicId': 'test', + 'cloudProviderName': 'Amazon'} + get_we.return_value = my_we, None + data['working_environment_id'] = my_we['publicId'] + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + my_aggregate = { + 'name': 'Dummyname', + 'state': 'online', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + 'disks': [{'device': 'xvdh vol-313', 'position': 'data', 'vmDiskProperties': None, + 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12h'}, + {'device': 'xvdi vol-314', 'position': 'data', 'vmDiskProperties': None, + 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12i'}], + 'homeNode': 'testAWSa-01', + } + get_aggregate_api.return_value = my_aggregate + get_delete_api.return_value = 'Aggregated Deleted', None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_aggregate: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py new file mode 100644 index 000000000..cee1e439c --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py @@ -0,0 +1,165 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx \ + import NetAppCloudManagerAWSFSX as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'workspace_id': 'test', + 'region': 'us-west-1', + 'tenant_id': 'account-test', + 'storage_capacity_size': 1024, + 'throughput_capacity': 512, + 'storage_capacity_size_unit': 'TiB', + 'aws_credentials_name': 'test', + 'primary_subnet_id': 'test', + 'secondary_subnet_id': 'test', + 'fsx_admin_password': 'password', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_create_cloudmanager_aws_fsx(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'workspace_id': 'test', + 'region': 'us-west-1', + 'tenant_id': 'account-test', + 'storage_capacity_size': 1024, + 'storage_capacity_size_unit': 'TiB', + 'throughput_capacity': 512, + 'aws_credentials_name': 'test', + 'primary_subnet_id': 'test', + 'secondary_subnet_id': 'test', + 'fsx_admin_password': 'password', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_delete_cloudmanager_aws_fsx(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'tenant_id': 'account-test', + 'refresh_token': 'myrefresh_token', + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.get_aws_credentials_id') + def test_module_fail_when_required_args_present(self, get_aws_credentials_id, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_aws_credentials_id.return_value = '123', None + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_aws_fsx_details') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.wait_on_completion_for_fsx') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.check_task_status_for_fsx') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.get_aws_credentials_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_aws_fsx_pass(self, get_post_api, get_aws_credentials_id, check_task_status_for_fsx, + wait_on_completion_for_fsx, get_aws_fsx_details, get_token): + set_module_args(self.set_args_create_cloudmanager_aws_fsx()) + get_token.return_value = 'test', 'test' + get_aws_credentials_id.return_value = '123', None + my_obj = my_module() + + response = {'id': 'abcdefg12345'} + get_post_api.return_value = response, None, None + check_task_status_for_fsx.return_value = {'providerDetails': {'status': {'status': 'ON', 'lifecycle': 'AVAILABLE'}}}, None + wait_on_completion_for_fsx.return_value = None + get_aws_fsx_details.return_value = None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_aws_fsx_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_aws_fsx_details') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + def test_delete_cloudmanager_aws_fsx_pass(self, get_delete_api, get_aws_fsx_details, get_token): + set_module_args(self.set_args_delete_cloudmanager_aws_fsx()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_fsx = { + 'name': 'test', + 'id': 'test'} + get_aws_fsx_details.return_value = my_fsx, None + get_delete_api.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_aws_fsx_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py new file mode 100644 index 000000000..023f993af --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py @@ -0,0 +1,252 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server \ + import NetAppCloudmanagerCifsServer as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'refreshToken', + 'domain': 'test.com', + 'username': 'admin', + 'password': 'abcde', + 'dns_domain': 'test.com', + 'ip_addresses': '["1.0.0.1"]', + 'netbios': 'cvoname', + 'organizational_unit': 'CN=Computers', + }) + + def set_default_args_with_workingenv_name_pass_check(self): + return dict({ + 'state': 'present', + 'working_environment_name': 'weone', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'refreshToken', + 'domain': 'test.com', + 'username': 'admin', + 'password': 'abcde', + 'dns_domain': 'test.com', + 'ip_addresses': '["1.0.0.1"]', + 'netbios': 'cvoname', + 'organizational_unit': 'CN=Computers', + }) + + def set_using_workgroup_args_pass_check(self): + return dict({ + 'state': 'present', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'refreshToken', + 'is_workgroup': True, + 'server_name': 'abc', + 'workgroup_name': 'wk', + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.create_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_cifs_server_successfully(self, send_request, create, get, get_token): + set_module_args(self.set_default_args_pass_check()) + get.return_value = None + create.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_cifs_server_idempotency(self, send_request, get, get_token): + set_module_args(self.set_default_args_pass_check()) + get.return_value = { + 'domain': 'test.com', + 'dns_domain': 'test.com', + 'ip_addresses': ['1.0.0.1'], + 'netbios': 'cvoname', + 'organizational_unit': 'CN=Computers', + } + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.create_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_cifs_server_using_workgroup_successfully(self, send_request, create, get, get_token): + set_module_args(self.set_using_workgroup_args_pass_check()) + get.return_value = None + create.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.delete_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_delete_cifs_server_successfully(self, send_request, delete, get, get_token): + args = self.set_default_args_pass_check() + args['state'] = 'absent' + set_module_args(args) + get.return_value = { + 'domain': 'test.com', + 'dns_domain': 'test.com', + 'ip_addresses': ['1.0.0.1'], + 'netbios': 'cvoname', + 'organizational_unit': 'CN=Computers', + } + delete.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.create_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_cifs_server_successfully(self, send_request, create, get, get_we, get_token): + args = self.set_default_args_with_workingenv_name_pass_check() + my_we = { + 'name': 'test', + 'publicId': 'test', + 'cloudProviderName': 'Amazon'} + get_we.return_value = my_we, None + args['working_environment_id'] = my_we['publicId'] + set_module_args(args) + get.return_value = None + create.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.delete_cifs_server') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_delete_cifs_server_with_workingenv_name_successfully(self, send_request, delete, get, get_we, get_token): + args = self.set_default_args_with_workingenv_name_pass_check() + args['state'] = 'absent' + my_we = { + 'name': 'test', + 'publicId': 'test', + 'cloudProviderName': 'Amazon'} + get_we.return_value = my_we, None + args['working_environment_id'] = my_we['publicId'] + set_module_args(args) + get.return_value = { + 'domain': 'test.com', + 'dns_domain': 'test.com', + 'ip_addresses': ['1.0.0.1'], + 'netbios': 'cvoname', + 'organizational_unit': 'CN=Computers', + } + delete.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py new file mode 100644 index 000000000..dab9cde66 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py @@ -0,0 +1,730 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) +from logging import exception + +__metaclass__ = type + +import json +import sys +import pytest + +HAS_BOTOCORE = True +try: + from botocore.exceptions import ClientError +except ImportError: + HAS_BOTOCORE = False + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch + +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws \ + import NetAppCloudManagerConnectorAWS as my_module, IMPORT_EXCEPTION, main as my_main + +if IMPORT_EXCEPTION is not None and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7: %s' % IMPORT_EXCEPTION) + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'region': 'us-west-1', + 'key_name': 'dev_automation', + 'subnet_id': 'subnet-test', + 'ami': 'ami-test', + 'security_group_ids': ['sg-test'], + 'refresh_token': 'myrefresh_token', + 'iam_instance_profile_name': 'OCCM_AUTOMATION', + 'account_id': 'account-test', + 'company': 'NetApp' + }) + + def set_args_create_cloudmanager_connector_aws(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'region': 'us-west-1', + 'key_name': 'dev_automation', + 'subnet_id': 'subnet-test', + 'ami': 'ami-test', + 'security_group_ids': ['sg-test'], + 'refresh_token': 'myrefresh_token', + 'iam_instance_profile_name': 'OCCM_AUTOMATION', + 'account_id': 'account-test', + 'company': 'NetApp' + }) + + def set_args_delete_cloudmanager_connector_aws(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'client_id': 'test', + 'instance_id': 'test', + 'region': 'us-west-1', + 'account_id': 'account-test', + 'refresh_token': 'myrefresh_token', + 'company': 'NetApp' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + def test_module_fail_when_required_args_present(self, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.create_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.register_agent_to_service') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_ami') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_connector_aws_pass(self, get_post_api, get_ami, register_agent_to_service, get_vpc, create_instance, get_instance, get_token): + set_module_args(self.set_args_create_cloudmanager_connector_aws()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + get_post_api.return_value = None, None, None + get_ami.return_value = 'ami-test' + register_agent_to_service.return_value = 'test', 'test' + get_vpc.return_value = 'test' + create_instance.return_value = 'test', 'test' + get_instance.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_connector_aws: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.delete_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm') + def test_delete_cloudmanager_connector_aws_pass(self, delete_occm, get_occm_agent_by_id, delete_api, get_instance, delete_instance, get_token): + set_module_args(self.set_args_delete_cloudmanager_connector_aws()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_instance = { + 'InstanceId': 'instance_id_1' + } + get_instance.return_value = my_instance + get_occm_agent_by_id.return_value = {'agentId': 'test', 'state': 'active'}, None + delete_api.return_value = None, None, None + delete_instance.return_value = None + delete_occm.return_value = None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_connector_aws: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.delete_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm') + def test_delete_cloudmanager_connector_aws_pass_no_ids(self, delete_occm, get_occm_agents, delete_api, get_instance, delete_instance, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_connector_aws = { + 'name': 'Dummyname', + 'client_id': 'test', + 'refresh_token': 'myrefresh_token', + } + my_instance = { + 'InstanceId': 'instance_id_1' + } + # get_connector_aws.return_value = my_connector_aws + get_instance.return_value = my_instance + delete_api.return_value = None, None, None + delete_instance.return_value = None + get_occm_agents.return_value = [{'agentId': 'test', 'status': 'active'}], None + delete_occm.return_value = None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print() + print('Info: test_delete_cloudmanager_connector_aws: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.delete_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm') + def test_delete_cloudmanager_connector_aws_negative_no_instance(self, delete_occm, get_occm_agents, delete_api, get_instance, delete_instance, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_connector_aws = { + 'name': 'Dummyname', + 'client_id': 'test', + 'refresh_token': 'myrefresh_token', + } + my_instance = None + # get_connector_aws.return_value = my_connector_aws + get_instance.return_value = my_instance + delete_api.return_value = None, None, None + delete_instance.return_value = None + get_occm_agents.return_value = [{'agentId': 'test', 'status': 'active'}], None + delete_occm.return_value = None, "some error on delete occm" + + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print() + print('Info: test_delete_cloudmanager_connector_aws: %s' % repr(exc.value)) + msg = "Error: deleting OCCM agent(s): [(None, 'some error on delete occm')]" + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_get_instance_empty(self, get_boto3_client, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2() + my_obj = my_module() + instance = my_obj.get_instance() + print('instance', instance) + assert instance is None + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_get_instance_one(self, get_boto3_client, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}]) + my_obj = my_module() + instance = my_obj.get_instance() + print('instance', instance) + assert instance + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_get_instance_many_terminated(self, get_boto3_client, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + my_obj = my_module() + instance = my_obj.get_instance() + print('instance', instance) + assert instance is None + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_get_instance_many_but_only_one_active(self, get_boto3_client, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + my_obj = my_module() + instance = my_obj.get_instance() + print('instance', instance) + assert instance + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_get_instance_many_but_only_one_active(self, get_boto3_client, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'active', 'name': 'xxxx'}]) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_instance() + msg = "Error: found multiple instances for name" + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_get_instance_exception(self, get_boto3_client, get_token): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + args.pop('instance_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2(raise_exc=True) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_instance() + msg = "An error occurred (test_only) when calling the describe_instances operation: forced error in unit testing" + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + @patch('boto3.client') + def test_create_instance(self, get_boto3_client, register, get_token, get_occm_agent_by_id, dont_sleep): + args = self.set_args_create_cloudmanager_connector_aws() + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None + get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'active'}, None + my_obj = my_module() + instance = my_obj.create_instance() + print('instance', instance) + assert instance + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.encode_certificates') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + @patch('boto3.client') + def test_create_instance_no_ami_with_tags(self, get_boto3_client, register, get_token, get_occm_agent_by_id, get_account, encode_cert, dont_sleep): + ''' additional paths: get_ami, add tags, no public IP, no account id ''' + args = self.set_args_create_cloudmanager_connector_aws() + args.pop('ami') + args.pop('account_id') + args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}] + args['associate_public_ip_address'] = False + args['proxy_certificates'] = ['cert1', 'cert2'] + set_module_args(args) + get_account.return_value = 'account_id', None + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + encode_cert.return_value = 'base64', None + register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None + get_occm_agent_by_id.side_effect = [ + ({'agentId': 'test', 'status': 'pending'}, None), + ({'agentId': 'test', 'status': 'pending'}, None), + ({'agentId': 'test', 'status': 'active'}, None)] + my_obj = my_module() + instance = my_obj.create_instance() + print('instance', instance) + assert instance + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + @patch('boto3.client') + def test_create_instance_timeout(self, get_boto3_client, register, get_token, get_occm_agent_by_id, dont_sleep): + ''' additional paths: get_ami, add tags, no public IP''' + args = self.set_args_create_cloudmanager_connector_aws() + args.pop('ami') + args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}] + args['associate_public_ip_address'] = False + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None + get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'pending'}, None + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_instance() + msg = "Error: taking too long for OCCM agent to be active or not properly setup" + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + @patch('boto3.client') + def test_create_instance_error_in_get_agent(self, get_boto3_client, register, get_token, get_occm_agent_by_id, dont_sleep): + ''' additional paths: get_ami, add tags, no public IP''' + args = self.set_args_create_cloudmanager_connector_aws() + args.pop('ami') + args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}] + args['associate_public_ip_address'] = False + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None + get_occm_agent_by_id.return_value = 'forcing an error', 'intentional error' + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_instance() + msg = "Error: not able to get occm status: intentional error, forcing an error" + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_create_instance_error_in_get_account(self, get_boto3_client, get_token, get_account, dont_sleep): + ''' additional paths: get_ami, add tags, no public IP, no account id ''' + args = self.set_args_create_cloudmanager_connector_aws() + args.pop('ami') + args.pop('account_id') + args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}] + args['associate_public_ip_address'] = False + set_module_args(args) + get_account.return_value = 'forcing an error', 'intentional error' + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_instance() + msg = "Error: failed to get account: intentional error." + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + @patch('boto3.client') + def test_create_instance_error_in_register(self, get_boto3_client, register, get_token, get_account, dont_sleep): + ''' additional paths: get_ami, add tags, no public IP, no account id ''' + args = self.set_args_create_cloudmanager_connector_aws() + args.pop('ami') + args.pop('account_id') + args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}] + args['associate_public_ip_address'] = False + set_module_args(args) + get_account.return_value = 'account_id', None + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + register.return_value = 'forcing an error', 'intentional error', 'dummy' + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_instance() + msg = "Error: unexpected response on connector setup: intentional error, forcing an error" + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.encode_certificates') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + @patch('boto3.client') + def test_create_instance_error_in_open(self, get_boto3_client, register, get_token, get_account, encode_cert, dont_sleep): + ''' additional paths: get_ami, add tags, no public IP, no account id ''' + args = self.set_args_create_cloudmanager_connector_aws() + args.pop('ami') + args.pop('account_id') + args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}] + args['associate_public_ip_address'] = False + args['proxy_certificates'] = ['cert1', 'cert2'] + set_module_args(args) + get_account.return_value = 'account_id', None + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'terminated'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None + encode_cert.return_value = None, 'intentional error' + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_instance() + msg = "Error: could not open/read file 'cert1' of proxy_certificates: intentional error" + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_instance(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agent_by_id.side_effect = [ + ({'agentId': 'test', 'status': 'active'}, None), + ({'agentId': 'test', 'status': 'active'}, None), + ({'agentId': 'test', 'status': 'terminated'}, None)] + my_obj = my_module() + error = my_obj.delete_instance() + assert not error + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm_agents') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_no_client(self, get_boto3_client, get_token, get_occm_agent_by_id, get_occm_agents_by_name, delete_occm_agents, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('client_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agent_by_id.side_effect = [ + ({'agentId': 'test', 'status': 'active'}, None), + ({'agentId': 'test', 'status': 'active'}, None), + ({'agentId': 'test', 'status': 'terminated'}, None)] + get_occm_agents_by_name.return_value = [], None + delete_occm_agents.return_value = None + with pytest.raises(AnsibleExitJson) as exc: + my_main() + assert not get_occm_agent_by_id.called + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_instance_timeout(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'active'}, None + my_obj = my_module() + error = my_obj.delete_instance() + assert 'Error: taking too long for instance to finish terminating.' == error + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_instance_error_on_agent(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'active'}, 'intentional error' + my_obj = my_module() + error = my_obj.delete_instance() + assert 'Error: not able to get occm agent status after deleting instance: intentional error,' in error + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_instance_client_id_not_found_403(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agent_by_id.return_value = 'Action not allowed for user', '403' + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error: not able to get occm agent status after deleting instance: 403," + assert msg in exc.value.args[0]['msg'] + print(exc.value.args[0]) + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_instance_client_id_not_found_other(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agent_by_id.return_value = 'Other error', '404' + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error: getting OCCM agents: 404," + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + # @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_delete_instance_account_id_not_found(self, get_boto3_client, get_token, dont_sleep): + args = self.set_args_delete_cloudmanager_connector_aws() + args.pop('account_id') + args.pop('client_id') + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + # get_occm_agent_by_id.return_value = 'Other error', '404' + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + # msg = "Error: getting OCCM agents: 404," + assert exc.value.args[0]['account_id'] is None + assert exc.value.args[0]['client_id'] is None + + @patch('time.sleep') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('boto3.client') + def test_modify_instance(self, get_boto3_client, get_token, get_occm_agents_by_name, dont_sleep): + args = self.set_args_create_cloudmanager_connector_aws() + args['instance_type'] = 't3.large' + set_module_args(args) + get_token.return_value = 'test', 'test' + get_boto3_client.return_value = EC2([{'state': 'active'}, + {'state': 'terminated', 'reservation': '2'}, + {'state': 'terminated', 'name': 'xxxx'}]) + get_occm_agents_by_name.return_value = [{'agentId': 'test', 'status': 'active'}], None + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + msg = "Note: modifying an existing connector is not supported at this time." + assert msg == exc.value.args[0]['modify'] + + +class EC2: + def __init__(self, get_instances=None, create_instance=True, raise_exc=False): + ''' list of instances as dictionaries: + name, state are optional, and used to build an instance + reservation is optional and defaults to 'default' + ''' + self.get_instances = get_instances if get_instances is not None else [] + self.create_instance = create_instance if create_instance is not None else [] + self.raise_exc = raise_exc + + def describe_instances(self, Filters=None, InstanceIds=None): + ''' return a list of reservations, each reservation is a list of instances + ''' + if self.raise_exc and HAS_BOTOCORE: + raise ClientError({'Error': {'Message': 'forced error in unit testing', 'Code': 'test_only'}}, 'describe_instances') + print('ec2', Filters) + print('ec2', InstanceIds) + return self._build_reservations() + + def describe_images(self, Filters=None, Owners=None): + ''' AMI ''' + return {'Images': [{'CreationDate': 'yyyyy', 'ImageId': 'image_id'}, + {'CreationDate': 'xxxxx', 'ImageId': 'image_id'}, + {'CreationDate': 'zzzzz', 'ImageId': 'image_id'}]} + + def describe_subnets(self, SubnetIds=None): + ''' subnets ''' + return {'Subnets': [{'VpcId': 'vpc_id'}]} + + def run_instances(self, **kwargs): + ''' create and start an instance''' + if self.create_instance: + return {'Instances': [{'InstanceId': 'instance_id'}]} + return {'Instances': []} + + def terminate_instances(self, **kwargs): + ''' terminate an instance''' + return + + def _build_reservations(self): + ''' return a list of reservations, each reservation is a list of instances + ''' + reservations = {} + for instance in self.get_instances: + reservation = instance.get('reservation', 'default') + if reservation not in reservations: + reservations[reservation] = [] + # provide default values for name or state if one is present + name, state = None, None + if 'name' in instance: + name = instance['name'] + state = instance.get('state', 'active') + elif 'state' in instance: + name = instance.get('name', 'd_name') + state = instance['state'] + instance_id = instance.get('instance_id', '12345') + instance_type = instance.get('instance_type', 't3.xlarge') + if name: + reservations[reservation].append({'Name': name, 'State': {'Name': state}, 'InstanceId': instance_id, 'InstanceType': instance_type}) + return { + 'Reservations': [ + {'Instances': instances} for instances in reservations.values() + ] + } diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py new file mode 100644 index 000000000..37a93a291 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py @@ -0,0 +1,178 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch + +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure \ + import NetAppCloudManagerConnectorAzure as my_module, IMPORT_EXCEPTION + +if IMPORT_EXCEPTION is not None and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7: %s' % IMPORT_EXCEPTION) + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection: + ''' Mock response of http connections ''' + + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + + +# using pytest natively, without unittest.TestCase +@pytest.fixture +def patch_ansible(): + with patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) as mocks: + yield mocks + + +def set_default_args_pass_check(): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'location': 'westus', + 'resource_group': 'occm_group_westus', + 'subnet_id': 'Subnet1', + 'vnet_id': 'Vnet1', + 'subscription_id': 'subscriptionId-test', + 'refresh_token': 'myrefresh_token', + 'account_id': 'account-test', + 'company': 'NetApp', + 'admin_username': 'test', + 'admin_password': 'test', + 'network_security_group_name': 'test' + }) + + +def set_args_create_cloudmanager_connector_azure(): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'location': 'westus', + 'resource_group': 'occm_group_westus', + 'subnet_id': 'Subnet1', + 'vnet_id': 'Vnet1', + 'subscription_id': 'subscriptionId-test', + 'refresh_token': 'myrefresh_token', + 'account_id': 'account-test', + 'company': 'NetApp', + 'admin_username': 'test', + 'admin_password': 'test', + 'network_security_group_name': 'test' + }) + + +def set_args_delete_cloudmanager_connector_azure(): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'client_id': 'test', + 'location': 'westus', + 'resource_group': 'occm_group_westus', + 'subnet_id': 'Subnet1', + 'vnet_id': 'Vnet1', + 'subscription_id': 'subscriptionId-test', + 'refresh_token': 'myrefresh_token', + 'account_id': 'account-test', + 'company': 'NetApp', + 'admin_username': 'test', + 'admin_password': 'test', + 'network_security_group_name': 'test' + }) + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +def test_module_fail_when_required_args_present(get_token, patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.deploy_azure') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.register_agent_to_service') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') +def test_create_cloudmanager_connector_azure_pass(get_post_api, register_agent_to_service, deploy_azure, get_token, patch_ansible): + set_module_args(set_args_create_cloudmanager_connector_azure()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + get_post_api.return_value = None, None, None + register_agent_to_service.return_value = 'test', 'test' + deploy_azure.return_value = None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_connector_azure: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.get_deploy_azure_vm') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.delete_azure_occm') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') +def test_delete_cloudmanager_connector_azure_pass(get_delete_api, delete_azure_occm, get_deploy_azure_vm, get_token, patch_ansible): + set_module_args(set_args_delete_cloudmanager_connector_azure()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + get_deploy_azure_vm.return_value = True + delete_azure_occm.return_value = None + get_delete_api.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_connector_azure: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py new file mode 100644 index 000000000..9d74af2d7 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py @@ -0,0 +1,407 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch + +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp \ + import NetAppCloudManagerConnectorGCP as my_module + +IMPORT_ERRORS = [] +HAS_GCP_COLLECTION = False + +try: + from google import auth + from google.auth.transport import requests + from google.oauth2 import service_account + import yaml + HAS_GCP_COLLECTION = True +except ImportError as exc: + IMPORT_ERRORS.append(str(exc)) + +if not HAS_GCP_COLLECTION and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required google packages on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + +# using pytest natively, without unittest.TestCase +@pytest.fixture(name='patch_ansible') +def fixture_patch_ansible(): + with patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) as mocks: + yield mocks + + +def set_default_args_pass_check(): + return dict({ + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'my_refresh_token', + 'state': 'present', + 'name': 'CxName', + 'project_id': 'tlv-support', + 'zone': 'us-west-1', + 'account_id': 'account-test', + 'company': 'NetApp', + 'service_account_email': 'terraform-user@tlv-support.iam.gserviceaccount.com', + }) + + +def set_args_create_cloudmanager_connector_gcp(): + return dict({ + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'my_refresh_token', + 'state': 'present', + 'name': 'CxName', + 'project_id': 'tlv-support', + 'zone': 'us-west-1', + 'account_id': 'account-test', + 'company': 'NetApp', + 'service_account_email': 'terraform-user@tlv-support.iam.gserviceaccount.com', + 'service_account_path': 'test.json', + }) + + +def set_args_delete_cloudmanager_connector_gcp(): + return dict({ + 'client_id': 'test', + 'refresh_token': 'my_refresh_token', + 'state': 'absent', + 'name': 'CxName', + 'project_id': 'tlv-support', + 'zone': 'us-west-1', + 'account_id': 'account-test', + 'company': 'NetApp', + 'service_account_email': 'terraform-user@tlv-support.iam.gserviceaccount.com', + 'service_account_path': 'test.json', + }) + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +def test_module_fail_when_required_args_present(get_token, get_gcp_token, patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(set_default_args_pass_check()) + get_token.return_value = 'bearer', 'test' + get_gcp_token.return_value = 'token', None + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.deploy_gcp_vm') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_custom_data_for_gcp') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.create_occm_gcp') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_deploy_vm') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') +def test_create_cloudmanager_connector_gcp_pass(get_post_api, get_vm, create_occm_gcp, get_custom_data_for_gcp, + deploy_gcp_vm, get_gcp_token, get_token, patch_ansible): + set_module_args(set_args_create_cloudmanager_connector_gcp()) + get_token.return_value = 'bearer', 'test' + get_gcp_token.return_value = 'test', None + my_obj = my_module() + + get_vm.return_value = None + deploy_gcp_vm.return_value = None, 'test', None + get_custom_data_for_gcp.return_value = 'test', 'test', None + create_occm_gcp.return_value = 'test' + get_post_api.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_connector_gcp: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'], create_occm_gcp.return_value[1] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.delete_occm_gcp') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_deploy_vm') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_occm_agents') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') +def test_delete_cloudmanager_connector_gcp_pass(get_delete_api, get_agents, get_deploy_vm, delete_occm_gcp, get_gcp_token, get_token, patch_ansible): + set_module_args(set_args_delete_cloudmanager_connector_gcp()) + get_token.return_value = 'bearer', 'test' + get_gcp_token.return_value = 'test', None + my_obj = my_module() + + my_connector_gcp = { + 'name': 'Dummyname-vm-boot-deployment', + 'client_id': 'test', + 'refresh_token': 'my_refresh_token', + 'operation': {'status': 'active'} + } + get_deploy_vm.return_value = my_connector_gcp + get_agents.return_value = [] + get_delete_api.return_value = None, None, None + delete_occm_gcp.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_connector_gcp: %s' % repr(exc.value)) + + assert exc.value.args[0]['changed'] + + +TOKEN_DICT = { + 'access_token': 'access_token', + 'token_type': 'token_type' +} + + +AGENT_DICTS = { + 'active': { + 'agent': {'status': 'active'}, + }, + 'pending': { + 'agent': {'status': 'pending'}, + }, + 'other': { + 'agent': {'status': 'pending', 'agentId': 'agent11', 'name': 'CxName', 'provider': 'GCP'}, + } +} + + +CLIENT_DICT = { + 'clientId': '12345', + 'clientSecret': 'a1b2c3' +} + +SRR = { + # common responses (json_dict, error, ocr_id) + 'empty_good': ({}, None, None), + 'zero_record': ({'records': []}, None, None), + 'get_token': (TOKEN_DICT, None, None), + 'get_gcp_token': (TOKEN_DICT, None, None), + 'get_agent_status_active': (AGENT_DICTS['active'], None, None), + 'get_agent_status_pending': (AGENT_DICTS['pending'], None, None), + 'get_agent_status_other': (AGENT_DICTS['other'], None, None), + 'get_agents': ({'agents': [AGENT_DICTS['other']['agent']]}, None, None), + 'get_agents_empty': ({'agents': []}, None, None), + 'get_agent_not_found': (b"{'message': 'Action not allowed for user'}", '403', None), + 'get_vm': ({'operation': {'status': 'active'}}, None, None), + 'get_vm_not_found': (b"{'message': 'is not found'}", '404', None), + 'register_agent': (CLIENT_DICT, None, None), + 'end_of_sequence': (None, "Unexpected call to send_request", None), + 'generic_error': (None, "Expected error", None), +} + + +@patch('time.sleep') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_delete_occm_gcp_pass(mock_request, get_gcp_token, ignore_sleep, patch_ansible): + set_module_args(set_args_delete_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['empty_good'], # delete + SRR['get_agent_status_active'], # status + SRR['get_agent_status_pending'], # status + SRR['get_agent_status_other'], # status + SRR['end_of_sequence'], + ] + my_obj = my_module() + + error = my_obj.delete_occm_gcp() + print(error) + print(mock_request.mock_calls) + assert error is None + + +@patch('time.sleep') +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_create_occm_gcp_pass(mock_request, get_gcp_token, ignore_sleep, patch_ansible): + set_module_args(set_args_create_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['register_agent'], # register + SRR['empty_good'], # deploy + SRR['get_agent_status_pending'], # status + SRR['get_agent_status_active'], # status + SRR['end_of_sequence'], + ] + my_obj = my_module() + + client_id = my_obj.create_occm_gcp() + print(client_id) + print(mock_request.mock_calls) + assert client_id == '12345' + + +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_get_deploy_vm_pass(mock_request, get_gcp_token, patch_ansible): + set_module_args(set_args_delete_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['get_vm'], # get + SRR['end_of_sequence'], + ] + my_obj = my_module() + + vm = my_obj.get_deploy_vm() + print(vm) + print(mock_request.mock_calls) + assert vm == SRR['get_vm'][0] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_get_occm_agents_absent_pass(mock_request, get_gcp_token, patch_ansible): + set_module_args(set_args_delete_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['get_agent_status_active'], # get + SRR['end_of_sequence'], + ] + my_obj = my_module() + + agents = my_obj.get_occm_agents() + print(agents) + print(mock_request.mock_calls) + assert agents == [SRR['get_agent_status_active'][0]['agent']] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_get_occm_agents_present_pass(mock_request, get_gcp_token, patch_ansible): + set_module_args(set_args_create_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['get_agents'], # get + SRR['end_of_sequence'], + ] + my_obj = my_module() + + agents = my_obj.get_occm_agents() + print(agents) + print(mock_request.mock_calls) + assert agents == SRR['get_agents'][0]['agents'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_create_idempotent(mock_request, get_gcp_token, patch_ansible): + set_module_args(set_args_create_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['get_vm'], # get + SRR['get_agents'], # get + SRR['end_of_sequence'], + ] + my_obj = my_module() + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + print(exc) + assert not exc.value.args[0]['changed'] + assert exc.value.args[0]['client_id'] == SRR['get_agents'][0]['agents'][0]['agentId'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +def test_delete_idempotent(mock_request, get_gcp_token, patch_ansible): + set_module_args(set_args_delete_cloudmanager_connector_gcp()) + get_gcp_token.return_value = 'test', None + mock_request.side_effect = [ + SRR['get_token'], # OAUTH + SRR['get_vm_not_found'], # get vn + SRR['get_agent_not_found'], # get agents + SRR['end_of_sequence'], + ] + my_obj = my_module() + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + print(exc) + assert not exc.value.args[0]['changed'] + assert exc.value.args[0]['client_id'] == "" + + +# @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token') +# @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') +# def test_delete_idempotent(mock_request, get_gcp_token, patch_ansible): +# set_module_args(set_args_delete_cloudmanager_connector_gcp()) +# get_gcp_token.return_value = 'test', None +# mock_request.side_effect = [ +# SRR['get_token'], # OAUTH +# SRR['get_vm_not_found'], # get vn +# SRR['get_agents'], # get +# SRR['end_of_sequence'], +# ] +# my_obj = my_module() + +# with pytest.raises(AnsibleExitJson) as exc: +# my_obj.apply() +# print(mock_request.mock_calls) +# print(exc) +# assert not exc.value.args[0]['changed'] +# assert exc.value.args[0]['client_id'] == SRR['get_agents'][0][0]['agentId'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py new file mode 100644 index 000000000..e3dc685d4 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py @@ -0,0 +1,426 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch + +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws \ + import NetAppCloudManagerCVOAWS as my_module, IMPORT_EXCEPTION + +if IMPORT_EXCEPTION is not None and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7: %s' % IMPORT_EXCEPTION) + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'client_id': 'test', + 'region': 'us-west-1', + 'use_latest_version': False, + 'ontap_version': 'ONTAP-9.10.0.T1', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'instance_type': 'm5.xlarge', + 'refresh_token': 'myrefresh_token', + 'is_ha': False + }) + + def set_args_create_cloudmanager_cvo_aws(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'client_id': 'test', + 'region': 'us-west-1', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'is_ha': False + }) + + def set_args_delete_cloudmanager_cvo_aws(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'client_id': 'test', + 'region': 'us-west-1', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'is_ha': False + }) + + def set_args_create_bynode_cloudmanager_cvo_aws(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'client_id': 'test', + 'region': 'us-west-1', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'license_type': 'cot-premium-byol', + 'platform_serial_number': '12345678', + 'is_ha': False + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + def test_module_fail_when_required_args_present(self, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_aws_pass(self, get_post_api, get_working_environment_details_by_name, get_nss, + get_tenant, get_vpc, wait_on_completion, get_token): + set_module_args(self.set_args_create_cloudmanager_cvo_aws()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_working_environment_details_by_name.return_value = None, None + get_post_api.return_value = response, None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + get_vpc.return_value = 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_aws_ha_pass(self, get_post_api, get_working_environment_details_by_name, get_nss, + get_tenant, get_vpc, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_aws() + data['is_ha'] = True + data['license_type'] = 'ha-capacity-paygo' + data['capacity_package_name'] = 'Essential' + data.pop('subnet_id') + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_working_environment_details_by_name.return_value = None, None + get_post_api.return_value = response, None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + get_vpc.return_value = 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_aws_ha_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_aws_capacity_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, get_vpc, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_aws() + data['license_type'] = 'capacity-paygo' + data['capacity_package_name'] = 'Essential' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_working_environment_details_by_name.return_value = None, None + get_post_api.return_value = response, None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + get_vpc.return_value = 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_aws_ha_capacity_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, get_vpc, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_aws() + data['is_ha'] = True + data['license_type'] = 'ha-capacity-paygo' + data['capacity_package_name'] = 'Essential' + data.pop('subnet_id') + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_working_environment_details_by_name.return_value = None, None + get_post_api.return_value = response, None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + get_vpc.return_value = 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_aws_ha_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_aws_nodebase_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, get_vpc, wait_on_completion, get_token): + data = self.set_args_create_bynode_cloudmanager_cvo_aws() + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_working_environment_details_by_name.return_value = None, None + get_post_api.return_value = response, None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + get_vpc.return_value = 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_aws_ha_nodebase_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, get_vpc, wait_on_completion, get_token): + data = self.set_args_create_bynode_cloudmanager_cvo_aws() + data['license_type'] = 'ha-cot-premium-byol' + data['platform_serial_number_node1'] = '12345678' + data['platform_serial_number_node2'] = '23456789' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_working_environment_details_by_name.return_value = None, None + get_post_api.return_value = response, None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + get_vpc.return_value = 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + def test_delete_cloudmanager_cvo_aws_pass(self, get_delete_api, get_working_environment_details_by_name, + wait_on_completion, get_token): + set_module_args(self.set_args_delete_cloudmanager_cvo_aws()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_cvo = { + 'name': 'test', + 'publicId': 'test'} + get_working_environment_details_by_name.return_value = my_cvo, None + get_delete_api.return_value = None, None, None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_cvo_aws_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_writing_speed_state') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.upgrade_ontap_image') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + def test_change_cloudmanager_cvo_aws(self, get_cvo, get_property, get_details, upgrade_ontap_image, update_svm_password, update_cvo_tags, + update_tier_level, update_instance_license_type, update_writing_speed_state, get_token): + data = self.set_default_args_pass_check() + data['svm_password'] = 'newpassword' + data['update_svm_password'] = True + data['ontap_version'] = 'ONTAP-9.10.1P3.T1' + data['upgrade_ontap_version'] = True + set_module_args(data) + + modify = ['svm_password', 'aws_tag', 'tier_level', 'ontap_version', 'instance_type', 'license_type', 'writing_speed_state'] + + my_cvo = { + 'name': 'TestA', + 'publicId': 'test', + 'cloudProviderName': 'Amazon', + 'svm_password': 'password', + 'isHa': False, + 'svmName': 'svm_TestA', + 'tenantId': 'Tenant-test', + 'workingEnvironmentType': 'VSA', + } + get_cvo.return_value = my_cvo, None + cvo_property = {'name': 'TestA', + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'ontapClusterProperties': { + 'capacityTierInfo': {'tierLevel': 'normal'}, + 'licenseType': {'capacityLimit': {'size': 2.0, 'unit': 'TB'}, + 'name': 'Cloud Volumes ONTAP Capacity Based Charging'}, + 'ontapVersion': '9.10.0', + 'upgradeVersions': [{'autoUpdateAllowed': False, + 'imageVersion': 'ONTAP-9.10.1P3', + 'lastModified': 1634467078000}], + 'writingSpeedState': 'NORMAL'}, + 'awsProperties': {'accountId': u'123456789011', + 'availabilityZones': [u'us-east-1b'], + 'bootDiskSize': None, + 'cloudProviderAccountId': None, + 'coreDiskExists': True, + 'instances': [{'availabilityZone': 'us-east-1b', + 'id': 'i-31', + 'imageId': 'ami-01a6f1234cb1ec375', + 'instanceProfileId': 'SimFabricPoolInstanceProfileId', + 'instanceType': 'm5.2xlarge', + 'isOCCMInstance': False, + 'isVsaInstance': True, + }], + 'regionName': 'us-west-1', + } + } + get_property.return_value = cvo_property, None + cvo_details = {'cloudProviderName': 'Amazon', + 'isHA': False, + 'name': 'TestA', + 'ontapClusterProperties': None, + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'userTags': {'key1': 'value1'}, + 'workingEnvironmentType': 'VSA'} + get_details.return_value = cvo_details, None + get_token.return_value = 'test', 'test' + my_obj = my_module() + + for item in modify: + if item == 'svm_password': + update_svm_password.return_value = True, None + elif item == 'aws_tag': + update_cvo_tags.return_value = True, None + elif item == 'tier_level': + update_tier_level.return_value = True, None + elif item == 'ontap_version': + upgrade_ontap_image.return_value = True, None + elif item == 'writing_speed_state': + update_writing_speed_state.return_value = True, None + elif item == 'instance_type' or item == 'license_type': + update_instance_license_type.return_value = True, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_change_cloudmanager_cvo_aws: %s' % repr(exc.value)) diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py new file mode 100644 index 000000000..f3e072bdb --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py @@ -0,0 +1,439 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_azure \ + import NetAppCloudManagerCVOAZURE as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + # self.token_type, self.token = self.get_token() + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'client_id': 'test', + 'location': 'westus', + 'use_latest_version': False, + 'ontap_version': 'ONTAP-9.10.0.T1.azure', + 'vnet_id': 'vpc-test', + 'resource_group': 'test', + 'subnet_id': 'subnet-test', + 'subscription_id': 'test', + 'cidr': '10.0.0.0/24', + 'svm_password': 'password', + 'license_type': 'azure-cot-standard-paygo', + 'instance_type': 'Standard_DS4_v2', + 'refresh_token': 'myrefresh_token', + 'is_ha': False + }) + + def set_args_create_cloudmanager_cvo_azure(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'client_id': 'test', + 'location': 'westus', + 'vnet_id': 'vpc-test', + 'resource_group': 'test', + 'subscription_id': 'test', + 'cidr': '10.0.0.0/24', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'is_ha': False + }) + + def set_args_delete_cloudmanager_cvo_azure(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'client_id': 'test', + 'location': 'westus', + 'vnet_id': 'vpc-test', + 'resource_group': 'test', + 'subscription_id': 'test', + 'cidr': '10.0.0.0/24', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'is_ha': False + }) + + def set_args_create_bynode_cloudmanager_cvo_azure(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'client_id': 'test', + 'location': 'westus', + 'vnet_id': 'vpc-test', + 'resource_group': 'test', + 'subscription_id': 'test', + 'cidr': '10.0.0.0/24', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'license_type': 'azure-cot-premium-byol', + 'serial_number': '12345678', + 'is_ha': False + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + self.rest_api = MockCMConnection() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + def test_module_fail_when_required_args_present(self, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_azure_pass(self, get_post_api, get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + set_module_args(self.set_args_create_cloudmanager_cvo_azure()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_azure_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_azure_capacity_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_azure() + data['license_type'] = 'capacity-paygo' + data['capacity_package_name'] = 'Essential' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_azure_capacity_license_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_azure_ha_capacity_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_azure() + data['is_ha'] = True + data['license_type'] = 'ha-capacity-paygo' + data['capacity_package_name'] = 'Professional' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_azure_ha_capacity_license_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_azure_nodebase_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_bynode_cloudmanager_cvo_azure() + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_azure_nodebase_license_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_azure_ha_nodebase_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_bynode_cloudmanager_cvo_azure() + data['is_ha'] = True + data['license_type'] = 'azure-ha-cot-premium-byol' + data['platform_serial_number_node1'] = '12345678' + data['platform_serial_number_node2'] = '23456789' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_azure_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_azure_ha_pass(self, get_post_api, get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_azure() + data['is_ha'] = True + data['license_type'] = 'ha-capacity-paygo' + data['capacity_package_name'] = 'Essential' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_azure_ha_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + def test_delete_cloudmanager_cvo_azure_pass(self, get_delete_api, get_working_environment_details_by_name, + wait_on_completion, get_token): + set_module_args(self.set_args_delete_cloudmanager_cvo_azure()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_cvo = { + 'name': 'Dummyname', + 'publicId': 'test'} + get_working_environment_details_by_name.return_value = my_cvo, None + get_delete_api.return_value = None, None, None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_cvo_azure_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_writing_speed_state') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.upgrade_ontap_image') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + def test_change_cloudmanager_cvo_azure(self, get_cvo, get_property, get_details, upgrade_ontap_image, update_svm_password, update_cvo_tags, + update_tier_level, update_instance_license_type, update_writing_speed_state, get_token): + data = self.set_default_args_pass_check() + data['svm_password'] = 'newpassword' + data['update_svm_password'] = True + data['ontap_version'] = 'ONTAP-9.10.1P3.T1.azure' + data['upgrade_ontap_version'] = True + data['instance_type'] = 'Standard_DS13_v2' + set_module_args(data) + + modify = ['svm_password', 'azure_tag', 'tier_level', 'ontap_version', 'instance_type', 'license_type'] + + my_cvo = { + 'name': 'TestA', + 'publicId': 'test', + 'svm_password': 'password', + 'isHA': False, + 'azure_tag': [{'tag_key': 'keya', 'tag_value': 'valuea'}, {'tag_key': 'keyb', 'tag_value': 'valueb'}], + } + get_cvo.return_value = my_cvo, None + + cvo_property = {'name': 'TestA', + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'ontapClusterProperties': { + 'capacityTierInfo': {'tierLevel': 'normal'}, + 'licensePackageName': 'Professional', + 'licenseType': {'capacityLimit': {'size': 2000.0, 'unit': 'TB'}, + 'name': 'Cloud Volumes ONTAP Capacity Based Charging'}, + 'ontapVersion': '9.10.0.T1.azure', + 'upgradeVersions': [{'autoUpdateAllowed': False, + 'imageVersion': 'ONTAP-9.10.1P3', + 'lastModified': 1634467078000}], + 'writingSpeedState': 'NORMAL'}, + 'providerProperties': { + 'cloudProviderAccountId': 'CloudProviderAccount-abcdwxyz', + 'regionName': 'westus', + 'instanceType': 'Standard_DS4_v2', + 'resourceGroup': { + 'name': 'TestA-rg', + 'location': 'westus', + 'tags': { + 'DeployedByOccm': 'true' + } + }, + 'vnetCidr': '10.0.0.0/24', + 'tags': { + 'DeployedByOccm': 'true' + }}, + 'tenantId': 'Tenant-abCdEfg1', + 'workingEnvironmentTyp': 'VSA' + } + get_property.return_value = cvo_property, None + cvo_details = {'cloudProviderName': 'Azure', + 'isHA': False, + 'name': 'TestA', + 'ontapClusterProperties': None, + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'userTags': {'DeployedByOccm': 'true', 'key1': 'value1'}, + 'workingEnvironmentType': 'VSA'} + get_details.return_value = cvo_details, None + get_token.return_value = 'test', 'test' + my_obj = my_module() + + for item in modify: + if item == 'svm_password': + update_svm_password.return_value = True, None + elif item == 'azure_tag': + update_cvo_tags.return_value = True, None + elif item == 'tier_level': + update_tier_level.return_value = True, None + elif item == 'ontap_version': + upgrade_ontap_image.return_value = True, None + elif item == 'writing_speed_state': + update_writing_speed_state.return_value = True, None + elif item == 'instance_type': + update_instance_license_type.return_value = True, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_change_cloudmanager_cvo_azure: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py new file mode 100644 index 000000000..1209d2b9e --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py @@ -0,0 +1,543 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_gcp \ + import NetAppCloudManagerCVOGCP as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + # self.token_type, self.token = self.get_token() + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'TestA', + 'client_id': 'test', + 'zone': 'us-west-1b', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'is_ha': False, + 'gcp_service_account': 'test_account', + 'data_encryption_type': 'GCP', + 'gcp_volume_type': 'pd-ssd', + 'gcp_volume_size': 500, + 'gcp_volume_size_unit': 'GB', + 'project_id': 'default-project', + 'tier_level': 'standard' + }) + + def set_args_create_cloudmanager_cvo_gcp(self): + return dict({ + 'state': 'present', + 'name': 'Dummyname', + 'client_id': 'test', + 'zone': 'us-west1-b', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'use_latest_version': False, + 'capacity_tier': 'cloudStorage', + 'ontap_version': 'ONTAP-9.10.0.T1.gcp', + 'is_ha': False, + 'gcp_service_account': 'test_account', + 'data_encryption_type': 'GCP', + 'gcp_volume_type': 'pd-ssd', + 'gcp_volume_size': 500, + 'gcp_volume_size_unit': 'GB', + 'gcp_labels': [{'label_key': 'key1', 'label_value': 'value1'}, {'label_key': 'keya', 'label_value': 'valuea'}], + 'project_id': 'default-project' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + self.rest_api = MockCMConnection() + print('Info: %s' % exc.value.args[0]['msg']) + + def set_args_delete_cloudmanager_cvo_gcp(self): + return dict({ + 'state': 'absent', + 'name': 'Dummyname', + 'client_id': 'test', + 'zone': 'us-west-1', + 'vpc_id': 'vpc-test', + 'subnet_id': 'subnet-test', + 'svm_password': 'password', + 'refresh_token': 'myrefresh_token', + 'is_ha': False, + 'gcp_service_account': 'test_account', + 'data_encryption_type': 'GCP', + 'gcp_volume_type': 'pd-ssd', + 'gcp_volume_size': 500, + 'gcp_volume_size_unit': 'GB', + 'project_id': 'project-test' + }) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + def test_module_fail_when_required_args_present(self, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_gcp_pass(self, get_post_api, get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + set_module_args(self.set_args_create_cloudmanager_cvo_gcp()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_gcp_ha_pass(self, get_post_api, get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_gcp() + data['is_ha'] = True + data['license_type'] = 'ha-capacity-paygo' + data['capacity_package_name'] = 'Essential' + data['subnet0_node_and_data_connectivity'] = 'default' + data['subnet1_cluster_connectivity'] = 'subnet2' + data['subnet2_ha_connectivity'] = 'subnet3' + data['subnet3_data_replication'] = 'subnet1' + data['vpc0_node_and_data_connectivity'] = 'default' + data['vpc1_cluster_connectivity'] = 'vpc2' + data['vpc2_ha_connectivity'] = 'vpc3' + data['vpc3_data_replication'] = 'vpc1' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_gcp_ha_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_gcp_capacity_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_gcp() + data['license_type'] = 'capacity-paygo' + data['capacity_package_name'] = 'Essential' + set_module_args(data) + + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_gcp_ha_capacity_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_gcp() + data['license_type'] = 'ha-capacity-paygo' + data['capacity_package_name'] = 'Essential' + data['is_ha'] = True + data['subnet0_node_and_data_connectivity'] = 'default' + data['subnet1_cluster_connectivity'] = 'subnet2' + data['subnet2_ha_connectivity'] = 'subnet3' + data['subnet3_data_replication'] = 'subnet1' + data['vpc0_node_and_data_connectivity'] = 'default' + data['vpc1_cluster_connectivity'] = 'vpc2' + data['vpc2_ha_connectivity'] = 'vpc3' + data['vpc3_data_replication'] = 'vpc1' + set_module_args(data) + + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_gcp_nodebase_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_gcp() + data['license_type'] = 'gcp-cot-premium-byol' + data['platform_serial_number'] = '12345678' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post') + def test_create_cloudmanager_cvo_gcp_ha_nodebase_license_pass(self, get_post_api, + get_working_environment_details_by_name, get_nss, + get_tenant, wait_on_completion, get_token): + data = self.set_args_create_cloudmanager_cvo_gcp() + data['is_ha'] = True + data['subnet0_node_and_data_connectivity'] = 'default' + data['subnet1_cluster_connectivity'] = 'subnet2' + data['subnet2_ha_connectivity'] = 'subnet3' + data['subnet3_data_replication'] = 'subnet1' + data['vpc0_node_and_data_connectivity'] = 'default' + data['vpc1_cluster_connectivity'] = 'vpc2' + data['vpc2_ha_connectivity'] = 'vpc3' + data['vpc3_data_replication'] = 'vpc1' + data['platform_serial_number_node1'] = '12345678' + data['platform_serial_number_node2'] = '23456789' + data['license_type'] = 'gcp-ha-cot-premium-byol' + set_module_args(data) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'publicId': 'abcdefg12345'} + get_post_api.return_value = response, None, None + get_working_environment_details_by_name.return_value = None, None + get_nss.return_value = 'nss-test', None + get_tenant.return_value = 'test', None + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete') + def test_delete_cloudmanager_cvo_gcp_pass(self, get_delete_api, get_working_environment_details_by_name, + wait_on_completion, get_token): + set_module_args(self.set_args_delete_cloudmanager_cvo_gcp()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + my_cvo = { + 'name': 'Dummyname', + 'publicId': 'test'} + get_working_environment_details_by_name.return_value = my_cvo, None + + get_delete_api.return_value = None, None, 'test' + wait_on_completion.return_value = None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + def test_change_cloudmanager_cvo_gcp(self, get_cvo, get_property, get_details, update_svm_password, update_cvo_tags, + update_tier_level, update_instance_license_type, get_token): + set_module_args(self.set_args_create_cloudmanager_cvo_gcp()) + + modify = ['svm_password', 'gcp_labels', 'tier_level', 'instance_type'] + + my_cvo = { + 'name': 'TestA', + 'publicId': 'test', + 'cloudProviderName': 'GCP', + 'isHA': False, + 'svmName': 'svm_TestA', + 'svm_password': 'password', + 'tenantId': 'Tenant-test', + } + get_cvo.return_value = my_cvo, None + cvo_property = {'name': 'Dummyname', + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'ontapClusterProperties': { + 'capacityTierInfo': {'tierLevel': 'standard'}, + 'licenseType': {'capacityLimit': {'size': 10.0, 'unit': 'TB'}, + 'name': 'Cloud Volumes ONTAP Standard'}, + 'ontapVersion': '9.10.0.T1', + 'writingSpeedState': 'NORMAL'}, + 'providerProperties': { + 'regionName': 'us-west1', + 'zoneName': ['us-west1-b'], + 'instanceType': 'n1-standard-8', + 'labels': {'cloud-ontap-dm': 'anscvogcp-deployment', + 'cloud-ontap-version': '9_10_0_t1', + 'key1': 'value1', + 'platform-serial-number': '90920130000000001020', + 'working-environment-id': 'vsaworkingenvironment-cxxt6zwj'}, + 'subnetCidr': '10.150.0.0/20', + 'projectName': 'default-project'}, + 'svmName': 'svm_Dummyname', + 'tenantId': 'Tenant-test', + 'workingEnvironmentTyp': 'VSA' + } + get_property.return_value = cvo_property, None + cvo_details = {'cloudProviderName': 'GCP', + 'isHA': False, + 'name': 'Dummyname', + 'ontapClusterProperties': None, + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'userTags': {'key1': 'value1'}, + 'workingEnvironmentType': 'VSA'} + get_details.return_value = cvo_details, None + get_token.return_value = 'test', 'test' + my_obj = my_module() + + for item in modify: + if item == 'svm_password': + update_svm_password.return_value = True, None + elif item == 'gcp_labels': + update_cvo_tags.return_value = True, None + elif item == 'tier_level': + update_tier_level.return_value = True, None + elif item == 'instance_type': + update_instance_license_type.return_value = True, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_change_cloudmanager_cvo_gcp: %s' % repr(exc.value)) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_writing_speed_state') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.upgrade_ontap_image') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + def test_change_cloudmanager_cvo_gcp_ha(self, get_cvo, get_property, get_details, upgrade_ontap_image, update_svm_password, + update_cvo_tags, update_tier_level, update_instance_license_type, update_writing_speed_state, get_token): + data = self.set_args_create_cloudmanager_cvo_gcp() + data['is_ha'] = True + data['svm_password'] = 'newpassword' + data['update_svm_password'] = True + data['ontap_version'] = 'ONTAP-9.10.1P3.T1.gcpha' + data['upgrade_ontap_version'] = True + data['subnet0_node_and_data_connectivity'] = 'default' + data['subnet1_cluster_connectivity'] = 'subnet2' + data['subnet2_ha_connectivity'] = 'subnet3' + data['subnet3_data_replication'] = 'subnet1' + data['vpc0_node_and_data_connectivity'] = 'default' + data['vpc1_cluster_connectivity'] = 'vpc2' + data['vpc2_ha_connectivity'] = 'vpc3' + data['vpc3_data_replication'] = 'vpc1' + data['platform_serial_number_node1'] = '12345678' + data['platform_serial_number_node2'] = '23456789' + data['license_type'] = 'gcp-ha-cot-premium-byol' + data['instance_type'] = 'n1-standard-8' + set_module_args(data) + + modify = ['svm_password', 'gcp_labels', 'tier_level', 'ontap_version', 'instance_type', 'license_type'] + + my_cvo = { + 'name': 'TestA', + 'publicId': 'test', + 'cloudProviderName': 'GCP', + 'isHA': True, + 'svmName': 'svm_TestA', + 'svm_password': 'password', + 'tenantId': 'Tenant-test', + } + get_cvo.return_value = my_cvo, None + cvo_property = {'name': 'Dummyname', + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'ontapClusterProperties': { + 'capacityTierInfo': {'tierLevel': 'standard'}, + 'licenseType': {'capacityLimit': {'size': 10.0, 'unit': 'TB'}, + 'name': 'Cloud Volumes ONTAP Standard'}, + 'ontapVersion': '9.10.0.T1', + 'upgradeVersions': [{'autoUpdateAllowed': False, + 'imageVersion': 'ONTAP-9.10.1P3', + 'lastModified': 1634467078000}], + 'writingSpeedState': 'NORMAL'}, + 'providerProperties': { + 'regionName': 'us-west1', + 'zoneName': ['us-west1-b'], + 'instanceType': 'n1-standard-8', + 'labels': {'cloud-ontap-dm': 'anscvogcp-deployment', + 'cloud-ontap-version': '9_10_0_t1', + 'key1': 'value1', + 'platform-serial-number': '90920130000000001020', + 'working-environment-id': 'vsaworkingenvironment-cxxt6zwj'}, + 'subnetCidr': '10.150.0.0/20', + 'projectName': 'default-project'}, + 'svmName': 'svm_Dummyname', + 'tenantId': 'Tenant-test', + 'workingEnvironmentTyp': 'VSA' + } + get_property.return_value = cvo_property, None + cvo_details = {'cloudProviderName': 'GCP', + 'isHA': True, + 'name': 'Dummyname', + 'ontapClusterProperties': None, + 'publicId': 'test', + 'status': {'status': 'ON'}, + 'userTags': {'key1': 'value1', 'partner-platform-serial-number': '90920140000000001019', + 'gcp_resource_id': '14004944518802780827', 'count-down': '3'}, + 'workingEnvironmentType': 'VSA'} + get_details.return_value = cvo_details, None + get_token.return_value = 'test', 'test' + my_obj = my_module() + + for item in modify: + if item == 'svm_password': + update_svm_password.return_value = True, None + elif item == 'gcp_labels': + update_cvo_tags.return_value = True, None + elif item == 'tier_level': + update_tier_level.return_value = True, None + elif item == 'ontap_version': + upgrade_ontap_image.return_value = True, None + elif item == 'writing_speed_state': + update_writing_speed_state.return_value = True, None + elif item == 'instance_type': + update_instance_license_type.return_value = True, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_change_cloudmanager_cvo_gcp: %s' % repr(exc.value)) diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py new file mode 100644 index 000000000..9b417ed1b --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py @@ -0,0 +1,591 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_info \ + import NetAppCloudmanagerInfo as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + +# using pytest natively, without unittest.TestCase +@pytest.fixture +def patch_ansible(): + with patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) as mocks: + yield mocks + + +def set_default_args_pass_check(patch_ansible): + return dict({ + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + }) + + +def set_args_get_cloudmanager_working_environments_info(): + args = { + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + 'gather_subsets': ['working_environments_info'] + } + return args + + +def set_args_get_cloudmanager_aggregates_info(): + args = { + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + 'gather_subsets': ['working_environments_info'] + } + return args + + +def set_args_get_accounts_info(): + args = { + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'myrefresh_token', + 'gather_subsets': ['accounts_info'] + } + return args + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environments_info') +def test_get_working_environments_info(working_environments_info, get_token, patch_ansible): + args = dict(set_args_get_cloudmanager_working_environments_info()) + set_module_args(args) + get_token.return_value = 'token_type', 'token' + working_environments_info.return_value = { + "azureVsaWorkingEnvironments": [ + { + "name": "testazure", + "cloudProviderName": "Azure", + "creatorUserEmail": "samlp|NetAppSAML|testuser", + "isHA": False, + "publicId": "VsaWorkingEnvironment-az123456", + "tenantId": "Tenant-2345", + "workingEnvironmentType": "VSA", + } + ], + "gcpVsaWorkingEnvironments": [], + "onPremWorkingEnvironments": [], + "vsaWorkingEnvironments": [ + { + "name": "testAWS", + "cloudProviderName": "Amazon", + "creatorUserEmail": "samlp|NetAppSAML|testuser", + "isHA": False, + "publicId": "VsaWorkingEnvironment-aws12345", + "tenantId": "Tenant-2345", + "workingEnvironmentType": "VSA", + }, + { + "name": "testAWSHA", + "cloudProviderName": "Amazon", + "creatorUserEmail": "samlp|NetAppSAML|testuser", + "isHA": True, + "publicId": "VsaWorkingEnvironment-awsha345", + "tenantId": "Tenant-2345", + "workingEnvironmentType": "VSA", + } + ] + }, None + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_info: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch( + 'ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_info.NetAppCloudmanagerInfo.get_aggregates_info') +def test_get_aggregates_info(aggregates_info, get_token, patch_ansible): + args = dict(set_args_get_cloudmanager_aggregates_info()) + set_module_args(args) + get_token.return_value = 'token_type', 'token' + aggregates_info.return_value = { + "azureVsaWorkingEnvironments": { + "VsaWorkingEnvironment-az123456": [ + { + "availableCapacity": { + "size": 430.0, + "unit": "GB" + }, + "disks": [ + { + "device": "LUN 3.1", + "name": "testazure-01-1", + "ownerNode": "testazure-01", + "position": "data", + } + ], + "encryptionType": "notEncrypted", + "homeNode": "testazure-01", + "isRoot": False, + "name": "aggr1", + "ownerNode": "testazure-01", + "providerVolumes": [ + { + "device": "1", + "diskType": "Premium_LRS", + "encrypted": False, + "instanceId": "testazureid", + "name": "testazuredatadisk1", + "size": { + "size": 500.0, + "unit": "GB" + }, + "state": "available" + } + ], + "sidlEnabled": False, + "snaplockType": "non_snaplock", + "state": "online", + "totalCapacity": { + "size": 500.0, + "unit": "GB" + }, + "usedCapacity": { + "size": 70.0, + "unit": "GB" + }, + "volumes": [ + { + "isClone": False, + "name": "svm_testazure_root", + "rootVolume": True, + "thinProvisioned": True, + "totalSize": { + "size": 1.0, + "unit": "GB" + }, + "usedSize": { + "size": 0.000339508056640625, + "unit": "GB" + } + }, + { + "isClone": False, + "name": "azv1", + "rootVolume": False, + "thinProvisioned": True, + "totalSize": { + "size": 500.0, + "unit": "GB" + }, + "usedSize": { + "size": 0.0, + "unit": "GB" + } + } + ] + }, + ] + }, + "gcpVsaWorkingEnvironments": {}, + "onPremWorkingEnvironments": {}, + "vsaWorkingEnvironments": { + "VsaWorkingEnvironment-aws12345": [ + { + "availableCapacity": { + "size": 430.0, + "unit": "GB" + }, + "disks": [ + { + "device": "xvdh vol-381", + "name": "testAWSHA-01-i-196h", + "ownerNode": "testAWSHA-01", + "position": "data", + }, + { + "device": "xvdh vol-382", + "name": "testAWSHA-01-i-195h", + "ownerNode": "testAWSHA-01", + "position": "data", + } + ], + "encryptionType": "cloudEncrypted", + "homeNode": "testAWSHA-01", + "isRoot": False, + "name": "aggr1", + "ownerNode": "testAWSHA-01", + "providerVolumes": [ + { + "device": "/dev/xvdh", + "diskType": "gp2", + "encrypted": True, + "id": "vol-381", + "instanceId": "i-196", + "name": "vol-381", + "size": { + "size": 500.0, + "unit": "GB" + }, + "state": "in-use" + }, + { + "device": "/dev/xvdh", + "diskType": "gp2", + "encrypted": True, + "id": "vol-382", + "instanceId": "i-195", + "name": "vol-382", + "size": { + "size": 500.0, + "unit": "GB" + }, + "state": "in-use" + } + ], + "sidlEnabled": True, + "snaplockType": "non_snaplock", + "state": "online", + "totalCapacity": { + "size": 500.0, + "unit": "GB" + }, + "usedCapacity": { + "size": 70.0, + "unit": "GB" + }, + "volumes": [ + { + "isClone": False, + "name": "svm_testAWSHA_root", + "rootVolume": True, + "thinProvisioned": True, + "totalSize": { + "size": 1.0, + "unit": "GB" + }, + "usedSize": { + "size": 0.000339508056640625, + "unit": "GB" + } + }, + { + "isClone": False, + "name": "vha", + "rootVolume": False, + "thinProvisioned": True, + "totalSize": { + "size": 100.0, + "unit": "GB" + }, + "usedSize": { + "size": 0.0, + "unit": "GB" + } + } + ] + } + ], + "VsaWorkingEnvironment-awsha345": [ + { + "availableCapacity": { + "size": 430.0, + "unit": "GB" + }, + "disks": [ + { + "device": "xvdg vol-369", + "name": "testAWS-01-i-190g", + "ownerNode": "testAWS-01", + "position": "data", + } + ], + "encryptionType": "cloudEncrypted", + "homeNode": "testAWS-01", + "isRoot": False, + "name": "aggr1", + "ownerNode": "testAWS-01", + "providerVolumes": [ + { + "device": "/dev/xvdg", + "diskType": "gp2", + "encrypted": True, + "id": "vol-369", + "instanceId": "i-190", + "name": "vol-369", + "size": { + "size": 500.0, + "unit": "GB" + }, + "state": "in-use" + } + ], + "sidlEnabled": True, + "snaplockType": "non_snaplock", + "state": "online", + "totalCapacity": { + "size": 500.0, + "unit": "GB" + }, + "usedCapacity": { + "size": 70.0, + "unit": "GB" + }, + "volumes": [ + { + "isClone": False, + "name": "svm_testAWS_root", + "rootVolume": True, + "thinProvisioned": True, + "totalSize": { + "size": 1.0, + "unit": "GB" + }, + "usedSize": { + "size": 0.000339508056640625, + "unit": "GB" + } + }, + { + "isClone": False, + "name": "v1", + "rootVolume": False, + "thinProvisioned": True, + "totalSize": { + "size": 100.0, + "unit": "GB" + }, + "usedSize": { + "size": 0.0, + "unit": "GB" + } + } + ] + }, + { + "availableCapacity": { + "size": 86.0, + "unit": "GB" + }, + "disks": [ + { + "device": "xvdh vol-371", + "name": "testAWS-01-i-190h", + "ownerNode": "testAWS-01", + "position": "data", + } + ], + "encryptionType": "cloudEncrypted", + "homeNode": "testAWS-01", + "isRoot": False, + "name": "aggr2", + "ownerNode": "testAWS-01", + "providerVolumes": [ + { + "device": "/dev/xvdh", + "diskType": "gp2", + "encrypted": True, + "id": "vol-371", + "instanceId": "i-190", + "name": "vol-371", + "size": { + "size": 100.0, + "unit": "GB" + }, + "state": "in-use" + } + ], + "sidlEnabled": True, + "snaplockType": "non_snaplock", + "state": "online", + "totalCapacity": { + "size": 100.0, + "unit": "GB" + }, + "usedCapacity": { + "size": 0.0, + "unit": "GB" + }, + "volumes": [] + } + ] + } + } + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_info: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') +@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_accounts_info') +def test_get_accounts_info(accounts_info, get_token, patch_ansible): + args = dict(set_args_get_accounts_info()) + set_module_args(args) + get_token.return_value = 'token_type', 'token' + accounts_info.return_value = { + "awsAccounts": [ + { + "accessKey": "1", + "accountId": "123456789011", + "accountName": "tami", + "accountType": "AWS_KEYS", + "publicId": "CloudProviderAccount-Ekj6L9QX", + "subscriptionId": "hackExp10Days", + "vsaList": [] + }, + { + "accessKey": "", + "accountId": "123456789011", + "accountName": "Instance Profile", + "accountType": "INSTANCE_PROFILE", + "occmRole": "occmRole", + "publicId": "InstanceProfile", + "subscriptionId": "hackExp10Days", + "vsaList": [ + { + "name": "CVO_AWSCluster", + "publicId": "VsaWorkingEnvironment-9m3I6i3I", + "workingEnvironmentType": "AWS" + }, + { + "name": "testAWS1", + "publicId": "VsaWorkingEnvironment-JCzkA9OX", + "workingEnvironmentType": "AWS" + }, + ] + } + ], + "azureAccounts": [ + { + "accountName": "AzureKeys", + "accountType": "AZURE_KEYS", + "applicationId": "1", + "publicId": "CloudProviderAccount-T84ceMYu", + "tenantId": "1", + "vsaList": [ + { + "name": "testAZURE", + "publicId": "VsaWorkingEnvironment-jI0tbceH", + "workingEnvironmentType": "AZURE" + }, + { + "name": "test", + "publicId": "VsaWorkingEnvironment-00EnDcfB", + "workingEnvironmentType": "AZURE" + }, + ] + }, + { + "accountName": "s", + "accountType": "AZURE_KEYS", + "applicationId": "1", + "publicId": "CloudProviderAccount-XxbN95dj", + "tenantId": "1", + "vsaList": [] + } + ], + "gcpStorageAccounts": [], + "nssAccounts": [ + { + "accountName": "TESTCLOUD2", + "accountType": "NSS_KEYS", + "nssUserName": "TESTCLOUD2", + "publicId": "be2f3cac-352a-46b9-a341-a446c35b61c9", + "vsaList": [ + { + "name": "testAWS", + "publicId": "VsaWorkingEnvironment-3txYJOsX", + "workingEnvironmentType": "AWS" + }, + { + "name": "testAZURE", + "publicId": "VsaWorkingEnvironment-jI0tbceH", + "workingEnvironmentType": "AZURE" + }, + ] + }, + { + "accountName": "ntapitdemo", + "accountType": "NSS_KEYS", + "nssUserName": "ntapitdemo", + "publicId": "01e43a7d-cfc9-4682-aa12-15374ce81638", + "vsaList": [ + { + "name": "test", + "publicId": "VsaWorkingEnvironment-00EnDcfB", + "workingEnvironmentType": "AZURE" + } + ] + } + ] + }, None + my_obj = my_module() + my_obj.rest_api.api_root_path = "my_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_info: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py new file mode 100644 index 000000000..a9f41beed --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py @@ -0,0 +1,144 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account \ + import NetAppCloudmanagerNssAccount as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'test_nss_account', + 'username': 'username', + 'password': 'password', + 'client_id': 'client_id', + 'refresh_token': 'refrsh_token' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.get_nss_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.create_nss_account') + def test_create_nss_account_successfully(self, create, get, get_token): + set_module_args(self.set_default_args_pass_check()) + get.return_value = None + create.return_value = None + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.get_nss_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.create_nss_account') + def test_create_nss_account_idempotency(self, create, get, get_token): + set_module_args(self.set_default_args_pass_check()) + get.return_value = { + 'name': 'test_nss_account', + 'username': 'TESTCLOUD1', + 'password': 'test_test', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'CvMJXRhz5V4dmxZqVg5LDRDlZyE - kbqRKT9YMcAsjmwFs' + } + create.return_value = None + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.get_nss_account') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.delete_nss_account') + def test_create_nss_account_successfully(self, delete, get, get_token): + args = self.set_default_args_pass_check() + args['state'] = 'absent' + set_module_args(args) + get.return_value = { + 'name': 'test_nss_account', + 'username': 'TESTCLOUD1', + 'password': 'test_test', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'refresh_token': 'CvMJXRhz5V4dmxZqVg5LDRDlZyE - kbqRKT9YMcAsjmwFs' + } + delete.return_value = None + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py new file mode 100644 index 000000000..9d1189489 --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py @@ -0,0 +1,176 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror \ + import NetAppCloudmanagerSnapmirror as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'source_working_environment_name': 'TestA', + 'destination_working_environment_name': 'TestB', + 'source_volume_name': 'source', + 'destination_volume_name': 'dest', + 'source_svm_name': 'source_svm', + 'destination_svm_name': 'dest_svm', + 'policy': 'MirrorAllSnapshots', + 'schedule': 'min', + 'max_transfer_rate': 102400, + 'client_id': 'client_id', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_create_cloudmanager_snapmirror(self): + return dict({ + 'state': 'present', + 'source_working_environment_name': 'TestA', + 'destination_working_environment_name': 'TestB', + 'source_volume_name': 'source', + 'destination_volume_name': 'dest', + 'source_svm_name': 'source_svm', + 'destination_svm_name': 'dest_svm', + 'policy': 'MirrorAllSnapshots', + 'schedule': 'min', + 'max_transfer_rate': 102400, + 'client_id': 'client_id', + 'refresh_token': 'myrefresh_token', + }) + + def set_args_delete_cloudmanager_snapmirror(self): + return dict({ + 'state': 'absent', + 'source_working_environment_name': 'TestA', + 'destination_working_environment_name': 'TestB', + 'source_volume_name': 'source', + 'destination_volume_name': 'dest', + 'client_id': 'client_id', + 'refresh_token': 'myrefresh_token', + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + def test_module_fail_when_required_args_present(self, get_token): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + get_token.return_value = 'test', 'test' + my_module() + exit_json(changed=True, msg="TestCase Fail when required args are present") + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_detail_for_snapmirror') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_snapmirror') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.build_quote_request') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.quote_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_volumes') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_interclusterlifs') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_cloudmanager_snapmirror_create_pass(self, send_request, get_interclusterlifs, get_volumes, quote_volume, build_quote_request, + get_snapmirror, wait_on_completion, get_working_environment_detail_for_snapmirror, get_token): + set_module_args(self.set_args_create_cloudmanager_snapmirror()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + response = {'id': 'abcdefg12345'} + source_we_info = {'publicId': 'test1', 'workingEnvironmentType': 'AMAZON'} + dest_we_info = {'publicId': 'test2', 'workingEnvironmentType': 'AMAZON', 'svmName': 'source_svm', 'name': 'TestB'} + source_vol = [{'name': 'source', 'svmName': 'source_svm', 'providerVolumeType': 'abc'}] + quote_volume_response = {'numOfDisks': 10, 'aggregateName': 'aggr1'} + interclusterlifs_resp = {'interClusterLifs': [{'address': '10.10.10.10'}], 'peerInterClusterLifs': [{'address': '10.10.10.10'}]} + get_working_environment_detail_for_snapmirror.return_value = source_we_info, dest_we_info, None + send_request.return_value = response, None, None + wait_on_completion.return_value = None + get_snapmirror.return_value = None + get_volumes.return_value = source_vol + build_quote_request.return_value = {'name': 'test'} + quote_volume.return_value = quote_volume_response + get_interclusterlifs.return_value = interclusterlifs_resp + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_cloudmanager_snapmirror_create_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_snapmirror') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_delete_cloudmanager_snapmirror_delete_pass(self, send_request, get_snapmirror, get_token): + set_module_args(self.set_args_delete_cloudmanager_snapmirror()) + get_token.return_value = 'test', 'test' + my_obj = my_module() + + my_snapmirror = { + 'source_working_environment_id': '456', + 'destination_svm_name': 'dest_svm', + 'destination_working_environment_id': '123'} + get_snapmirror.return_value = my_snapmirror + send_request.return_value = None, None, None + + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_cloudmanager_snapmirror_delete_pass: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py new file mode 100644 index 000000000..15b4802df --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py @@ -0,0 +1,216 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests Cloudmanager Ansible module: ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import sys +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest +from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume \ + import NetAppCloudmanagerVolume as my_module + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5): + pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7') + + +def set_module_args(args): + '''prepare arguments so that they will be picked up during module creation''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + '''Exception class to be raised by module.exit_json and caught by the test case''' + + +class AnsibleFailJson(Exception): + '''Exception class to be raised by module.fail_json and caught by the test case''' + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over exit_json; package return data into an exception''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + '''function to patch over fail_json; package return data into an exception''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockCMConnection(): + ''' Mock response of http connections ''' + def __init__(self, kind=None, parm1=None): + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'testvol', + 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'svm_name': 'svm_justinaws', + 'snapshot_policy_name': 'default', + 'tiering_policy': 'auto', + 'export_policy_type': 'custom', + 'export_policy_ip': ["10.30.0.1/16"], + 'export_policy_nfs_version': ["nfs4"], + 'refresh_token': 'myrefresh_token', + 'size': 10, + }) + + def set_default_args_with_workingenv_name_pass_check(self): + return dict({ + 'state': 'present', + 'name': 'testvol', + 'working_environment_name': 'weone', + 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh', + 'svm_name': 'svm_justinaws', + 'snapshot_policy_name': 'default', + 'export_policy_type': 'custom', + 'export_policy_ip': ["10.30.0.1/16"], + 'export_policy_nfs_version': ["nfs4"], + 'refresh_token': 'myrefresh_token', + 'size': 10, + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.create_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_volume_successfully(self, send_request, create, get, get_token): + set_module_args(self.set_default_args_pass_check()) + get.return_value = None + create.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_volume_idempotency(self, send_request, get, get_token): + set_module_args(self.set_default_args_pass_check()) + get.return_value = { + 'name': 'testvol', + 'snapshot_policy_name': 'default', + 'export_policy_type': 'custom', + 'export_policy_ip': ["10.30.0.1/16"], + 'export_policy_nfs_version': ["nfs4"], + } + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.modify_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_update_volume_successfully(self, send_request, get, get_token, modify): + set_module_args(self.set_default_args_pass_check()) + get.return_value = { + 'name': 'testvol', + 'snapshot_policy_name': 'default', + 'tiering_policy': 'snapshot_only', + 'export_policy_type': 'custom', + 'export_policy_ip': ["10.30.0.1/16"], + 'export_policy_nfs_version': ["nfs3", "nfs4"], + } + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)] + get_token.return_value = ("type", "token") + modify.return_value = None + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.modify_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_update_volume_idempotency(self, send_request, get, get_token, modify): + set_module_args(self.set_default_args_pass_check()) + get.return_value = { + 'name': 'testvol', + 'snapshot_policy_name': 'default', + 'export_policy_type': 'custom', + 'export_policy_ip': ["10.30.0.1/16"], + 'export_policy_nfs_version': ["nfs4"], + } + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)] + get_token.return_value = ("type", "token") + modify.return_value = None + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.create_volume') + @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request') + def test_create_volume_by_workingenv_name_successfully(self, send_request, create, get, get_we, get_token): + args = self.set_default_args_with_workingenv_name_pass_check() + my_we = { + 'name': 'test', + 'publicId': 'test', + 'cloudProviderName': 'Amazon'} + get_we.return_value = my_we, None + args['working_environment_id'] = my_we['publicId'] + set_module_args(args) + get.return_value = None + create.return_value = None + send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)] + get_token.return_value = ("type", "token") + obj = my_module() + obj.rest_api.api_root_path = "test_root_path" + + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt b/ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt new file mode 100644 index 000000000..484081daa --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt @@ -0,0 +1 @@ +cryptography>=3.2.0 ; python_version >= '3.5' \ No newline at end of file diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt b/ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt new file mode 100644 index 000000000..88c25079f --- /dev/null +++ b/ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt @@ -0,0 +1,10 @@ +boto3 ; python_version >= '3.5' +botocore ; python_version >= '3.5' +azure-mgmt-compute ; python_version >= '3.5' +azure-mgmt-network ; python_version >= '3.5' +azure-mgmt-storage ; python_version >= '3.5' +azure-mgmt-resource ; python_version >= '3.5' +azure-cli-core ; python_version >= '3.5' +msrestazure ; python_version >= '3.5' +azure-common ; python_version >= '3.5' +google-auth ; python_version >= '3.5' diff --git a/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..93fbe057a --- /dev/null +++ b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,210 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to report a bug in netapp.elementsw!** + + + ⚠ + Verify first that your issue is not [already reported on + GitHub][issue search] and keep in mind that we may have to keep + the current behavior because [every change breaks someone's + workflow][XKCD 1172]. + We try to be mindful about this. + + Also test if the latest release and devel branch are affected too. + + + **Tip:** If you are seeking community support, please consider + [Join our Slack community][ML||IRC]. + + + + [ML||IRC]: + https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg + + [issue search]: ../search?q=is%3Aissue&type=issues + + [XKCD 1172]: https://xkcd.com/1172/ + + +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with netapp.elementsw from the devel branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + + + **Tip:** Cannot find it in this repository? Please be advised that + the source for some parts of the documentation are hosted outside + of this repository. If the page you are reporting describes + modules/plugins/etc that are not officially supported by the + Ansible Core Engineering team, there is a good chance that it is + coming from one of the [Ansible Collections maintained by the + community][collections org]. If this is the case, please make sure + to file an issue under the appropriate project there instead. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` below, under + the prompt line. Please don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + value: | + $ ansible --version + placeholder: | + $ ansible --version + ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) + config file = None + configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = ~/src/github/ansible/ansible/lib/ansible + ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections + executable location = bin/ansible + python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] + jinja version = 2.11.3 + libyaml = True + validations: + required: true + +- type: textarea + attributes: + label: ElementSW Collection Version + description: >- + ElementSW Collection Version. Run `ansible-galaxy collection` and copy the entire output + render: console + value: | + $ ansible-galaxy collection list + validations: + required: true + +- type: textarea + attributes: + label: Playbook + description: >- + The task from the playbook that is give you the issue + render: console + validations: + required: true + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: | + 1. Implement the following playbook: + + ```yaml + --- + # ping.yml + - hosts: all + gather_facts: false + tasks: + - ping: + ... + ``` + 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv` + 3. An error occurs. + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y and was shocked + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output and don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + placeholder: >- + Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))} + Exception: + Traceback (most recent call last): + File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main + status = self.run(options, args) + File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run + wb.build(autobuilding=True) + File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build + self.requirement_set.prepare_files(self.finder) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files + ignore_dependencies=self.ignore_dependencies)) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file + session=self.session, hashes=hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url + hashes=hashes + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url + hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url + stream=True, + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get + return self.request('GET', url, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request + return super(PipSession, self).request(method, url, *args, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request + resp = self.send(prep, **send_kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send + r = adapter.send(request, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send + resp = super(CacheControlAdapter, self).send(request, **kw) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send + raise SSLError(e, request=request) + SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),)) + ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2. + ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1. + validations: + required: true + + +- type: markdown + attributes: + value: > + *One last thing...* + + + Thank you for your collaboration! + + +... diff --git a/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..8bb6094c7 --- /dev/null +++ b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,100 @@ +--- +name: ✨ Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to suggest a feature for netapp.elementsw!** + + 💡 + Before you go ahead with your request, please first consider if it + would be useful for majority of the netapp.elementsw users. As a + general rule of thumb, any feature that is only of interest to a + small sub group should be [implemented in a third-party Ansible + Collection][contribute to collections] or maybe even just your + project alone. Be mindful of the fact that the essential + netapp.elementsw features have a broad impact. + + +
+ + ❗ Every change breaks someone's workflow. + + + + [![❗ Every change breaks someone's workflow. + ](https://imgs.xkcd.com/comics/workflow.png) + ](https://xkcd.com/1172/) +
+ + + ⚠ + Verify first that your idea is not [already requested on + GitHub][issue search]. + + Also test if the main branch does not already implement this. + + +- type: textarea + attributes: + label: Summary + description: > + Describe the new feature/improvement you would like briefly below. + + + What's the problem this feature will solve? + + What are you trying to do, that you are unable to achieve + with netapp.elementsw as it currently stands? + + + * Provide examples of real-world use cases that this would enable + and how it solves the problem you described. + + * How do you solve this now? + + * Have you tried to work around the problem using other tools? + + * Could there be a different approach to solving this issue? + + placeholder: >- + I am trying to do X with netapp.elementsw from the devel branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of netapp.elementsw because of Z. + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: >- + I asked on https://stackoverflow.com/.... and the community + advised me to do X, Y and Z. + validations: + required: true + +... diff --git a/ansible_collections/netapp/elementsw/.github/workflows/coverage.yml b/ansible_collections/netapp/elementsw/.github/workflows/coverage.yml new file mode 100644 index 000000000..9e2692651 --- /dev/null +++ b/ansible_collections/netapp/elementsw/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.elementsw Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on elementsw + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.11 + run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/elementsw/ + rsync -av . ansible_collections/netapp/elementsw/ --exclude ansible_collections/netapp/elementsw/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/elementsw/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/elementsw/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/elementsw/ + verbose: true \ No newline at end of file diff --git a/ansible_collections/netapp/elementsw/.github/workflows/main.yml b/ansible_collections/netapp/elementsw/.github/workflows/main.yml new file mode 100644 index 000000000..2b9ec2379 --- /dev/null +++ b/ansible_collections/netapp/elementsw/.github/workflows/main.yml @@ -0,0 +1,47 @@ +name: NetApp.elementsw Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }} on Elementsw + runs-on: ubuntu-latest + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - devel + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/elementsw/ + rsync -av . ansible_collections/netapp/elementsw/ --exclude ansible_collections/netapp/elementsw/ + + + - name: Run sanity tests Elementsw + run: ansible-test sanity --docker -v --color + working-directory: ansible_collections/netapp/elementsw/ + + - name: Run Unit Tests + run: ansible-test units --docker -v --color + working-directory: ansible_collections/netapp/elementsw/ diff --git a/ansible_collections/netapp/elementsw/CHANGELOG.rst b/ansible_collections/netapp/elementsw/CHANGELOG.rst new file mode 100644 index 000000000..a611ba793 --- /dev/null +++ b/ansible_collections/netapp/elementsw/CHANGELOG.rst @@ -0,0 +1,192 @@ +========================================= +NetApp ElementSW Collection Release Notes +========================================= + +.. contents:: Topics + + +v21.7.0 +======= + +Minor Changes +------------- + +- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +v21.6.1 +======= + +Bugfixes +-------- + +- requirements.txt - point to the correct python dependency + +v21.3.0 +======= + +Minor Changes +------------- + +- na_elementsw_info - add ``cluster_nodes`` and ``cluster_drives``. +- na_elementsw_qos_policy - explicitly define ``minIOPS``, ``maxIOPS``, ``burstIOPS`` as int. + +Bugfixes +-------- + +- na_elementsw_drive - lastest SDK does not accept ``force_during_bin_sync`` and ``force_during_upgrade``. +- na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` to str, causing type mismatch issues in comparisons. +- na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' object has no attribute 'minutes') + +v20.11.0 +======== + +Minor Changes +------------- + +- na_elementsw_snapshot_schedule - Add ``retention`` in examples. + +Bugfixes +-------- + +- na_elementsw_drive - Object of type 'dict_values' is not JSON serializable. + +v20.10.0 +======== + +Minor Changes +------------- + +- na_elementsw_cluster - add new options ``encryption``, ``order_number``, and ``serial_number``. +- na_elementsw_network_interfaces - make all options not required, so that only bond_1g can be set for example. +- na_elementsw_network_interfaces - restructure options into 2 dictionaries ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow all older options. + +New Modules +----------- + +- netapp.elementsw.na_elementsw_info - NetApp Element Software Info + +v20.9.1 +======= + +Bugfixes +-------- + +- na_elementsw_node - improve error reporting when cluster name cannot be set because node is already active. +- na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back + +v20.9.0 +======= + +Minor Changes +------------- + +- na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes. +- na_elementsw_node - ``preset_only`` to only set the cluster name before creating a cluster with na_elementsw_cluster. +- na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or ID. + +Bugfixes +-------- + +- na_elementsw_node - fix check_mode so that no action is taken. + +New Modules +----------- + +- netapp.elementsw.na_elementsw_qos_policy - NetApp Element Software create/modify/rename/delete QOS Policy + +v20.8.0 +======= + +Minor Changes +------------- + +- add "required:true" where missing. +- add "type:str" (or int, dict) where missing in documentation section. +- na_elementsw_drive - add all drives in a cluster, allow for a list of nodes or a list of drives. +- remove "required:true" for state and use present as default. +- use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + +Bugfixes +-------- + +- na_elementsw_access_group - fix check_mode so that no action is taken. +- na_elementsw_admin_users - fix check_mode so that no action is taken. +- na_elementsw_cluster - create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create. +- na_elementsw_cluster_snmp - double exception because of AttributeError. +- na_elementsw_drive - node_id or drive_id were not handled properly when using numeric ids. +- na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups was ignored and redundant. +- na_elementsw_ldap - double exception because of AttributeError. +- na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), remove default values and fix documentation. +- na_elementsw_vlan - AttributeError if VLAN already exists. +- na_elementsw_vlan - change in attributes was ignored. +- na_elementsw_vlan - fix check_mode so that no action is taken. +- na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation. +- na_elementsw_volume - double exception because of AttributeError. + +v20.6.0 +======= + +Bugfixes +-------- + +- galaxy.yml - fix repository and homepage links. + +v20.2.0 +======= + +Bugfixes +-------- + +- galaxy.yml - fix path to github repository. +- netapp.py - report error in case of connection error rather than raising a generic exception by default. + +v20.1.0 +======= + +New Modules +----------- + +- netapp.elementsw.na_elementsw_access_group_volumes - NetApp Element Software Add/Remove Volumes to/from Access Group + +v19.10.0 +======== + +Minor Changes +------------- + +- refactor existing modules as a collection + +v2.8.0 +====== + +New Modules +----------- + +- netapp.elementsw.na_elementsw_cluster_config - Configure Element SW Cluster +- netapp.elementsw.na_elementsw_cluster_snmp - Configure Element SW Cluster SNMP +- netapp.elementsw.na_elementsw_initiators - Manage Element SW initiators + +v2.7.0 +====== + +New Modules +----------- + +- netapp.elementsw.na_elementsw_access_group - NetApp Element Software Manage Access Groups +- netapp.elementsw.na_elementsw_account - NetApp Element Software Manage Accounts +- netapp.elementsw.na_elementsw_admin_users - NetApp Element Software Manage Admin Users +- netapp.elementsw.na_elementsw_backup - NetApp Element Software Create Backups +- netapp.elementsw.na_elementsw_check_connections - NetApp Element Software Check connectivity to MVIP and SVIP. +- netapp.elementsw.na_elementsw_cluster - NetApp Element Software Create Cluster +- netapp.elementsw.na_elementsw_cluster_pair - NetApp Element Software Manage Cluster Pair +- netapp.elementsw.na_elementsw_drive - NetApp Element Software Manage Node Drives +- netapp.elementsw.na_elementsw_ldap - NetApp Element Software Manage ldap admin users +- netapp.elementsw.na_elementsw_network_interfaces - NetApp Element Software Configure Node Network Interfaces +- netapp.elementsw.na_elementsw_node - NetApp Element Software Node Operation +- netapp.elementsw.na_elementsw_snapshot - NetApp Element Software Manage Snapshots +- netapp.elementsw.na_elementsw_snapshot_restore - NetApp Element Software Restore Snapshot +- netapp.elementsw.na_elementsw_snapshot_schedule - NetApp Element Software Snapshot Schedules +- netapp.elementsw.na_elementsw_vlan - NetApp Element Software Manage VLAN +- netapp.elementsw.na_elementsw_volume - NetApp Element Software Manage Volumes +- netapp.elementsw.na_elementsw_volume_clone - NetApp Element Software Create Volume Clone +- netapp.elementsw.na_elementsw_volume_pair - NetApp Element Software Volume Pair diff --git a/ansible_collections/netapp/elementsw/FILES.json b/ansible_collections/netapp/elementsw/FILES.json new file mode 100644 index 000000000..7113c56bd --- /dev/null +++ b/ansible_collections/netapp/elementsw/FILES.json @@ -0,0 +1,649 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bd0735ea0d7847ed0f372da0cf7d7f8a0a2471aec49b5c16901d1c32793e43e", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd42778f85cd3b989604d0227af4cc90350d94f5864938eb0bd29cf7a66401c3", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc9a4b7d4d77cf221f256e5972707d08f424f319b856ef4a8fdd0dbe9a3dc322", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a98ea2d0aec17e10c6b5a956cfaa1dcddbd336b674079a1f86e85429381a49e7", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_elementsw_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33132c95ba546d56bf953e1613dd39ad8a258379b3a32120f7be8b19e2c0d8a2", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_initiators.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a0e280ee9ef13b994f98c848524dc53b3a3a16559e3d1e22be6573272327c8c", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_qos_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4934c116271845de9f5da2f9747042601e961bc929f3a22397961313b3888e06", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_cluster_snmp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ee85a0b9e6ac2b0151a52b7722a43ea3e358d48f48816f5fac597151fd58d93", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25b0f4b869b1b814160da50df5b7b06d0e5d3eb83ca8887a0fead337699d6c62", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4b329b6f3c13f500a95ad0fb40eba4db5873b78b0c137997c858229336011af", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_access_group_volumes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "532fbf39ed0ee98af0e9323f037ab0e0f52d5eac9179a82eeb169a5a48cdfd3e", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_snapshot_schedule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a07aa78ae73ec965592b77bad72bbedd724b519e82f51805d5fd414d3f9c414", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_node.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6882747383c770c6ec43585e3a4db0081c8de165415d40941532324208e3aa4e", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_access_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7099bfffb1ec35ed7c0a40c0708cb4d1d79f6267b16fcc71f759796add15edaf", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_cluster_pair.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ddd54266eb0a3ebf891d8c1310059b40cfbad7679db3d7f2b9c600baf31e42ca", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_volume_pair.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ead937f30287dfd02521b4fdda1e0a128cd1d3ba8db4a721330ff4bbfb76e284", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_cluster_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6dc94b752a4931e30ea169f61aec3919a7cd7636ce3aeff4764094d2adc355f7", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "983415a406d31e2edd3e06b64745363e0d1c5ee7575058298bfdce6919522e31", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_ldap.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b8a59c8c45c1aa147c2d90b01654135f31ac4a1e31c643ce3b07007d6f28ea9", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39414c4cb613271d96220d275f027404e41e4b5dd61db5c7ad6eb3f70bc3243b", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d42be06f947c782d42fdd9141daeb87374855fc996ecfc53a450e20216cc6e05", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_volume_clone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05f518bb36b88476c0a6dc329587400937c88c64bb335bd0f3ad279c79cf845e", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_check_connections.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54458477eb0807256e663f64924d88cf5a5cb8058c0e7212a155a4aff9f87997", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_drive.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d7a53bf79e58150eff5f6979890afb54a6859597121a4cee0e7b4e6020f0eb0", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7dbfc7b05e3c69ebbb1723314094d62e07a4b328cba09db899808fd50d38bc15", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_snapshot_restore.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d70395bc1a83498c08081aaa31fa4e5bb8ebfccbc03b7c9f1cb0aa6a4d132c9", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_backup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b545334782c314c7c2c8e857f85838859b461176369ed002f3fba7414062b809", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_network_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d045d9768f1b469c3aeda533dbfdcbdb5a2f51a2d9949c59a3f73b56959ca082", + "format": 1 + }, + { + "name": "plugins/modules/na_elementsw_admin_users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b822e729b9e40361b148fd9739fddf1c26705597a092b5d967e29676eed9fb66", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da9e4399d51f4aa7e39d11a4c8adb3ea291252334eeebc6e5569777c717739da", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66d9f46f9b572b24f6465f43d2aebfb43f3fe2858ad528472559ba089dc2fb3c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6aa0100e51bbe54b6e9edeb072b7de526542e55da1cede0d1ae5f4367ec89eb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d910be3c377edddb04f6f74c3e4908a9d6d32c71ec251cf74e9eaa6711b1bffe", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9390907ec097add3aa2d936dd95f63d05bfac2b5b730ae12df50d14c5a18e0c1", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_nodes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b563b9adab2f4c7a67354fa2b7a2e3468cf68b041ba51c788e0e082e4b50b7ba", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_cluster_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae4c8e648a16dfa704964ef0f3782ea27adec2f1c0ceb5fca84ab86e888caffa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_qos_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "189242c5691fba4c436403cbfeb512fdab01c8bd35b028d7262b4cdeca9c7376", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5002081bc3177a94e5b2911259138ba80b2cf03006c6333c78cc50731f89fbbe", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_initiators.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5cc8b59e5120ff8f6b51a9b2085d336f63c5b91d7d3f21db629176c92c2f011", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "655c454425b97c72bb924b5def11e8dc65dd9dc4cd40cf00df66ae85120ba40f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_template.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb1802b2cd87193966ccc7d8b0c6c94522d7954bfada73febb8aeae77367322c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "489f21207a0de4f7ab263096c0f2d2c674cb9a334b45edb76165f7a933b13c5e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_access_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4682bf1c6d258032a9a9b001254246a2993e006ab2aa32463e42bed5e192e09f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_elementsw_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a138bc7c455af917d85a69c4e010ae92cda34cff767fe7d0514806ab82d22b0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules_utils/test_netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a40d8651793b9771d6f56d5e8b52772597a77e317002a9f9bf3400cffd014d60", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a2a08d11b2cf3859e796da8a7928461df41efdd14abbc7e4234a37da5ca19c4", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3734.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "419f9e02843f2fc7b584c8d3a4160769b1939784dbc0f726c55daeca0bc6bef9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3324.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "784b39c5d9440affb1dbab3ba8769ec1e88e7570798448c238a77d32dbf6e505", + "format": 1 + }, + { + "name": "changelogs/fragments/20.9.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56bed0aab9696af7068eb1bb743eb316ab23c3200ac6faa715a303e5f33f0973", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3196.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94573d6e6ddde5f8a053d72a7e49d87d13c4274f5ea5c24c6c0a95947215977b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3800.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fc0ea3ba25f76222015eba223c4a88c7d36b52cb5d767a5c3a9374746532a5e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3733.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a1ce243b30c79588a96fac9c050487d9b9ea63208e9c30934b7af77cc24dfe4", + "format": 1 + }, + { + "name": "changelogs/fragments/2019.10.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b1a5ef7df5f1e6e66ddc013149aea0480eb79f911a0563e2e6d7d9af79d5572", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3174.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cfc4addbf3343a3ce121f5de6cc2cc8244ad7b62a7429c2694543dabc2a8ccf", + "format": 1 + }, + { + "name": "changelogs/fragments/20.2.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c98764e792ed6c6d9cee6df80b9fff8f4fcadaf765c0aa0f0ed3dd5e3080fec", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3117.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "242f770eafb49994810a3263e23e1d342aeb36396819045c48f491810aab6908", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3731.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f92782e45a47a3439f8a858c3f283879fdc070422109d5a9ab2fdaa7ca56293", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3310.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8132aa931d13a49ba1a3c0fee131c048c6767ce17b3d9cabafa7e34f3c7c239a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3235.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cddb1135b1c15ca3c8f130bcc439d73ac819c7a3e0472c9ff358c75405bd8cb3", + "format": 1 + }, + { + "name": "changelogs/fragments/20.8.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b13007f7b14dd35357ec0fb06b0e89cf5fee56036b0a6004dfb21c46010cb7c1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3188.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0efa05e4cca58b1bfe30a60673adc266e7598d841065486b5b29c7e7a8b29bf4", + "format": 1 + }, + { + "name": "changelogs/fragments/20.6.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6192b3cccdc7c1e1eb0d61a49dd20c6f234499b6dd9b52b2f974b673e99f7a47", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4416.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70f470630a3fb893540ad9060634bfd0955e4a3371ab1a921e44bdc6b5ea1ba5", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad8dbe639e83e6feef631362bf2d78cde3c51c093203c0de8113b0d1cbc7756d", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ada0df4adf6ff17cdb5493e6050ec750fa13347ea71a6122a7e139f65f842b50", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5fef29bf470c1567ed5ba3e3d5f227d21db4d23455c4fd12628e3e3ad80ddd76", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "140dc9b99f730080720586330df5ee7ef8f5e74b5898738d2b269ac52bbe4666", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c8be00f495b1a0e20d3e4c2bca809b9eda7d2ab92e838bfad951dfa37e7b3d2", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b59987ccd30474cf321e36496cc8b30464bdd816c5b3860d659356bc3e2a2a7f", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1818f97ced0b9d61cd4d65742e14cb618a333be7f734c1fee8bb420323e5373d", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/elementsw/MANIFEST.json b/ansible_collections/netapp/elementsw/MANIFEST.json new file mode 100644 index 000000000..fda95d344 --- /dev/null +++ b/ansible_collections/netapp/elementsw/MANIFEST.json @@ -0,0 +1,34 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "elementsw", + "version": "21.7.0", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "storage", + "netapp", + "solidfire" + ], + "description": "Netapp ElementSW (Solidfire) Collection", + "license": [ + "GPL-2.0-or-later" + ], + "license_file": null, + "dependencies": {}, + "repository": "https://github.com/ansible-collections/netapp.elementsw", + "documentation": null, + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": "https://github.com/ansible-collections/netapp.elementsw/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "472a7d73c3fe2719a7c500eadc92b8f89ca852d2c5aee2b71d7afb688c97dc8c", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/elementsw/README.md b/ansible_collections/netapp/elementsw/README.md new file mode 100644 index 000000000..96b62e64d --- /dev/null +++ b/ansible_collections/netapp/elementsw/README.md @@ -0,0 +1,133 @@ +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/elementsw/index.html) +![example workflow](https://github.com/ansible-collections/netapp.elementsw/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.elementsw/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.elementsw) + + +netapp.elementSW + +NetApp ElementSW Collection + +Copyright (c) 2019 NetApp, Inc. All rights reserved. +Specifications subject to change without notice. + +# Installation +```bash +ansible-galaxy collection install netapp.elementsw +``` +To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module +``` +collections: + - netapp.elementsw +``` + +# Module documentation +https://docs.ansible.com/ansible/devel/collections/netapp/elementsw/ + +# Need help +Join our Slack Channel at [Netapp.io](http://netapp.io/slack) + +# Release Notes + +## 21.7.0 + +### Minor changes + - all modules - enable usage of Ansible module group defaults - for Ansible 2.12+. + +## 21.6.1 +### Bug Fixes + - requirements.txt: point to the correct python dependency + +## 21.3.0 + +### New Options + - na_elementsw_qos_policy: explicitly define `minIOPS`, `maxIOPS`, `burstIOPS` as int. + +### Minor changes + - na_elementsw_info - add `cluster_nodes` and `cluster_drives`. + +### Bug Fixes + - na_elementsw_drive - latest SDK does not accept ``force_during_bin_sync`` and ``force_during_upgrade``. + - na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` to str, causing type mismatch issues in comparisons. + - na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' object has no attribute 'minutes') + +## 20.11.0 + +### Minor changes +- na_elementsw_snapshot_schedule - Add `retention` in examples. + +### Bug Fixes +- na_elementsw_drive - Object of type 'dict_values' is not JSON serializable. + +## 20.10.0 + +### New Modules +- na_elementsw_info: support for two subsets `cluster_accounts`, `node_config`. + +### New Options +- na_elementsw_cluster: `encryption` to enable encryption at rest. `order_number` and `serial_number` for demo purposes. +- na_elementsw_network_interfaces: restructure options, into 2 dictionaries `bond_1g` and `bond_10g`, so that there is no shared option. Disallow all older options. +- na_elementsw_network_interfaces: make all options not required, so that only bond_1g can be set for example. + +## 20.9.1 + +### Bug Fixes +- na_elementsw_node: improve error reporting when cluster name cannot be set because node is already active. +- na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back + +## 20.9.0 + +### New Modules +- na_elementsw_qos_policy: create, modify, rename, or delete QOS policy. + +### New Options +- na_elementsw_node: `cluster_name` to set the cluster name on new nodes. +- na_elementsw_node: `preset_only` to only set the cluster name before creating a cluster with na_elementsw_cluster. +- na_elementsw_volume: `qos_policy_name` to provide a QOS policy name or ID. + +### Bug Fixes +- na_elementsw_node: fix check_mode so that no action is taken. + +## 20.8.0 + +### New Options +- na_elementsw_drive: add all drives in a cluster, allow for a list of nodes or a list of drives. + +### Bug Fixes +- na_elementsw_access_group: fix check_mode so that no action is taken. +- na_elementsw_admin_users: fix check_mode so that no action is taken. +- na_elementsw_cluster: create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create. +- na_elementsw_cluster_snmp: double exception because of AttributeError. +- na_elementsw_drive: node_id or drive_id were not handled properly when using numeric ids. +- na_elementsw_initiators: volume_access_group_id was ignored. volume_access_groups was ignored and redundant. +- na_elementsw_ldap: double exception because of AttributeError. +- na_elementsw_snapshot_schedule: ignore schedules being deleted (idempotency), remove default values and fix documentation. +- na_elementsw_vlan: AttributeError if VLAN already exists. +- na_elementsw_vlan: fix check_mode so that no action is taken. +- na_elementsw_vlan: change in attributes was ignored. +- na_elementsw_volume: double exception because of AttributeError. +- na_elementsw_volume: Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation. + +### Module documentation changes +- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. +- add type: str (or int, dict) where missing in documentation section. +- add required: true where missing. +- remove required: true for state and use present as default. + +## 20.6.0 +### Bug Fixes +- galaxy.xml: fix repository and homepage links + +## 20.2.0 +### Bug Fixes +- galaxy.yml: fix path to github repository. +- netapp.py: report error in case of connection error rather than raising a generic exception by default. + +## 20.1.0 +### New Module +- na_elementsw_access_group_volumes: add/remove volumes to/from existing access group + +## 19.11.0 +## 19.10.0 +Changes in 19.10.0 and September collection releases compared to Ansible 2.9 +### Documentation Fixes: +- na_elementsw_drive: na_elementsw_drive was documented as na_element_drive diff --git a/ansible_collections/netapp/elementsw/changelogs/changelog.yaml b/ansible_collections/netapp/elementsw/changelogs/changelog.yaml new file mode 100644 index 000000000..97d921301 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/changelog.yaml @@ -0,0 +1,221 @@ +ancestor: null +releases: + 19.10.0: + changes: + minor_changes: + - refactor existing modules as a collection + fragments: + - 2019.10.0.yaml + release_date: '2019-11-14' + 2.7.0: + modules: + - description: NetApp Element Software Manage Access Groups + name: na_elementsw_access_group + namespace: '' + - description: NetApp Element Software Manage Accounts + name: na_elementsw_account + namespace: '' + - description: NetApp Element Software Manage Admin Users + name: na_elementsw_admin_users + namespace: '' + - description: NetApp Element Software Create Backups + name: na_elementsw_backup + namespace: '' + - description: NetApp Element Software Check connectivity to MVIP and SVIP. + name: na_elementsw_check_connections + namespace: '' + - description: NetApp Element Software Create Cluster + name: na_elementsw_cluster + namespace: '' + - description: NetApp Element Software Manage Cluster Pair + name: na_elementsw_cluster_pair + namespace: '' + - description: NetApp Element Software Manage Node Drives + name: na_elementsw_drive + namespace: '' + - description: NetApp Element Software Manage ldap admin users + name: na_elementsw_ldap + namespace: '' + - description: NetApp Element Software Configure Node Network Interfaces + name: na_elementsw_network_interfaces + namespace: '' + - description: NetApp Element Software Node Operation + name: na_elementsw_node + namespace: '' + - description: NetApp Element Software Manage Snapshots + name: na_elementsw_snapshot + namespace: '' + - description: NetApp Element Software Restore Snapshot + name: na_elementsw_snapshot_restore + namespace: '' + - description: NetApp Element Software Snapshot Schedules + name: na_elementsw_snapshot_schedule + namespace: '' + - description: NetApp Element Software Manage VLAN + name: na_elementsw_vlan + namespace: '' + - description: NetApp Element Software Manage Volumes + name: na_elementsw_volume + namespace: '' + - description: NetApp Element Software Create Volume Clone + name: na_elementsw_volume_clone + namespace: '' + - description: NetApp Element Software Volume Pair + name: na_elementsw_volume_pair + namespace: '' + release_date: '2018-09-21' + 2.8.0: + modules: + - description: Configure Element SW Cluster + name: na_elementsw_cluster_config + namespace: '' + - description: Configure Element SW Cluster SNMP + name: na_elementsw_cluster_snmp + namespace: '' + - description: Manage Element SW initiators + name: na_elementsw_initiators + namespace: '' + release_date: '2019-04-11' + 20.1.0: + modules: + - description: NetApp Element Software Add/Remove Volumes to/from Access Group + name: na_elementsw_access_group_volumes + namespace: '' + release_date: '2020-01-08' + 20.10.0: + changes: + minor_changes: + - na_elementsw_cluster - add new options ``encryption``, ``order_number``, and + ``serial_number``. + - na_elementsw_network_interfaces - make all options not required, so that only + bond_1g can be set for example. + - na_elementsw_network_interfaces - restructure options into 2 dictionaries + ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow + all older options. + fragments: + - DEVOPS-3117.yaml + - DEVOPS-3196.yaml + - DEVOPS-3235.yaml + modules: + - description: NetApp Element Software Info + name: na_elementsw_info + namespace: '' + release_date: '2020-10-08' + 20.11.0: + changes: + bugfixes: + - na_elementsw_drive - Object of type 'dict_values' is not JSON serializable. + minor_changes: + - na_elementsw_snapshot_schedule - Add ``retention`` in examples. + fragments: + - DEVOPS-3310.yml + - DEVOPS-3324.yaml + release_date: '2020-11-05' + 20.2.0: + changes: + bugfixes: + - galaxy.yml - fix path to github repository. + - netapp.py - report error in case of connection error rather than raising a + generic exception by default. + fragments: + - 20.2.0.yaml + release_date: '2020-02-05' + 20.6.0: + changes: + bugfixes: + - galaxy.yml - fix repository and homepage links. + fragments: + - 20.6.0.yaml + release_date: '2020-06-03' + 20.8.0: + changes: + bugfixes: + - na_elementsw_access_group - fix check_mode so that no action is taken. + - na_elementsw_admin_users - fix check_mode so that no action is taken. + - na_elementsw_cluster - create cluster if it does not exist. Do not expect + MVIP or SVIP to exist before create. + - na_elementsw_cluster_snmp - double exception because of AttributeError. + - na_elementsw_drive - node_id or drive_id were not handled properly when using + numeric ids. + - na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups + was ignored and redundant. + - na_elementsw_ldap - double exception because of AttributeError. + - na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), + remove default values and fix documentation. + - na_elementsw_vlan - AttributeError if VLAN already exists. + - na_elementsw_vlan - change in attributes was ignored. + - na_elementsw_vlan - fix check_mode so that no action is taken. + - na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid + python identifier - renamed to enable512emulation. + - na_elementsw_volume - double exception because of AttributeError. + minor_changes: + - add "required:true" where missing. + - add "type:str" (or int, dict) where missing in documentation section. + - na_elementsw_drive - add all drives in a cluster, allow for a list of nodes + or a list of drives. + - remove "required:true" for state and use present as default. + - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same + thing for 2.8 and 2.9. + fragments: + - 20.8.0.yaml + release_date: '2020-08-05' + 20.9.0: + changes: + bugfixes: + - na_elementsw_node - fix check_mode so that no action is taken. + minor_changes: + - na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes. + - na_elementsw_node - ``preset_only`` to only set the cluster name before creating + a cluster with na_elementsw_cluster. + - na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or + ID. + fragments: + - 20.9.0.yaml + modules: + - description: NetApp Element Software create/modify/rename/delete QOS Policy + name: na_elementsw_qos_policy + namespace: '' + release_date: '2020-09-02' + 20.9.1: + changes: + bugfixes: + - na_elementsw_node - improve error reporting when cluster name cannot be set + because node is already active. + - na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo + have been added back + fragments: + - DEVOPS-3174.yaml + - DEVOPS-3188.yaml + release_date: '2020-09-08' + 21.3.0: + changes: + bugfixes: + - na_elementsw_drive - lastest SDK does not accept ``force_during_bin_sync`` + and ``force_during_upgrade``. + - na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` + to str, causing type mismatch issues in comparisons. + - na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' + object has no attribute 'minutes') + minor_changes: + - na_elementsw_info - add ``cluster_nodes`` and ``cluster_drives``. + - na_elementsw_qos_policy - explicitly define ``minIOPS``, ``maxIOPS``, ``burstIOPS`` + as int. + fragments: + - DEVOPS-3731.yaml + - DEVOPS-3733.yaml + - DEVOPS-3734.yaml + release_date: '2021-03-03' + 21.6.1: + changes: + bugfixes: + - requirements.txt - point to the correct python dependency + fragments: + - DEVOPS-3800.yaml + release_date: '2021-05-18' + 21.7.0: + changes: + minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + fragments: + - DEVOPS-4416.yaml + release_date: '2021-11-03' diff --git a/ansible_collections/netapp/elementsw/changelogs/config.yaml b/ansible_collections/netapp/elementsw/changelogs/config.yaml new file mode 100644 index 000000000..2d637df5c --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: NetApp ElementSW Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml new file mode 100644 index 000000000..832b5f56f --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml @@ -0,0 +1,3 @@ +bugfixes: + - galaxy.yml - fix path to github repository. + - netapp.py - report error in case of connection error rather than raising a generic exception by default. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml new file mode 100644 index 000000000..fcd0d11ee --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml @@ -0,0 +1,2 @@ +bugfixes: + - galaxy.yml - fix repository and homepage links. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml new file mode 100644 index 000000000..5c959531a --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml @@ -0,0 +1,21 @@ +minor_changes: + - na_elementsw_drive - add all drives in a cluster, allow for a list of nodes or a list of drives. + - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + - add "type:str" (or int, dict) where missing in documentation section. + - add "required:true" where missing. + - remove "required:true" for state and use present as default. + +bugfixes: + - na_elementsw_access_group - fix check_mode so that no action is taken. + - na_elementsw_admin_users - fix check_mode so that no action is taken. + - na_elementsw_cluster - create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create. + - na_elementsw_cluster_snmp - double exception because of AttributeError. + - na_elementsw_drive - node_id or drive_id were not handled properly when using numeric ids. + - na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups was ignored and redundant. + - na_elementsw_ldap - double exception because of AttributeError. + - na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), remove default values and fix documentation. + - na_elementsw_vlan - AttributeError if VLAN already exists. + - na_elementsw_vlan - fix check_mode so that no action is taken. + - na_elementsw_vlan - change in attributes was ignored. + - na_elementsw_volume - double exception because of AttributeError. + - na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml new file mode 100644 index 000000000..a406c9c2d --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml @@ -0,0 +1,7 @@ +minor_changes: + - na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes. + - na_elementsw_node - ``preset_only`` to only set the cluster name before creating a cluster with na_elementsw_cluster. + - na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or ID. + +bugfixes: + - na_elementsw_node - fix check_mode so that no action is taken. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml new file mode 100644 index 000000000..5723daa11 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml @@ -0,0 +1,2 @@ +minor_changes: + - refactor existing modules as a collection diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml new file mode 100644 index 000000000..23a6cafa4 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_elementsw_cluster - add new options ``encryption``, ``order_number``, and ``serial_number``. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml new file mode 100644 index 000000000..01e754719 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_elementsw_node - improve error reporting when cluster name cannot be set because node is already active. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml new file mode 100644 index 000000000..ad5d8bee7 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back \ No newline at end of file diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml new file mode 100644 index 000000000..21a70b02c --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_elementsw_network_interfaces - make all options not required, so that only bond_1g can be set for example. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml new file mode 100644 index 000000000..8a2f82f34 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_elementsw_network_interfaces - restructure options into 2 dictionaries ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow all older options. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml new file mode 100644 index 000000000..729e6d062 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_elementsw_snapshot_schedule - Add ``retention`` in examples. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml new file mode 100644 index 000000000..b87e308d8 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_elementsw_drive - Object of type 'dict_values' is not JSON serializable. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml new file mode 100644 index 000000000..a4e43ed45 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_elementsw_qos_policy - explicitly define ``minIOPS``, ``maxIOPS``, ``burstIOPS`` as int. +bugfixes: + - na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` to str, causing type mismatch issues in comparisons. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml new file mode 100644 index 000000000..7310f3b75 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_elementsw_info - add ``cluster_nodes`` and ``cluster_drives``. +bugfixes: + - na_elementsw_drive - lastest SDK does not accept ``force_during_bin_sync`` and ``force_during_upgrade``. diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml new file mode 100644 index 000000000..08c5bf552 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' object has no attribute 'minutes') diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml new file mode 100644 index 000000000..b6e57d046 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml @@ -0,0 +1,2 @@ +bugfixes: + - requirements.txt - point to the correct python dependency diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml new file mode 100644 index 000000000..6b4b660a0 --- /dev/null +++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/elementsw/meta/runtime.yml b/ansible_collections/netapp/elementsw/meta/runtime.yml new file mode 100644 index 000000000..05a30f02f --- /dev/null +++ b/ansible_collections/netapp/elementsw/meta/runtime.yml @@ -0,0 +1,28 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_elementsw: + - na_elementsw_access_group + - na_elementsw_access_group_volumes + - na_elementsw_account + - na_elementsw_admin_users + - na_elementsw_backup + - na_elementsw_check_connections + - na_elementsw_cluster_config + - na_elementsw_cluster_pair + - na_elementsw_cluster + - na_elementsw_cluster_snmp + - na_elementsw_drive + - na_elementsw_info + - na_elementsw_initiators + - na_elementsw_ldap + - na_elementsw_network_interfaces + - na_elementsw_node + - na_elementsw_qos_policy + - na_elementsw_snapshot + - na_elementsw_snapshot_restore + - na_elementsw_snapshot_schedule + - na_elementsw_vlan + - na_elementsw_volume_clone + - na_elementsw_volume_pair + - na_elementsw_volume diff --git a/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..229d03f7d --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, NetApp Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire +''' + + # Documentation fragment for SolidFire + SOLIDFIRE = r''' +options: + hostname: + required: true + description: + - The hostname or IP address of the SolidFire cluster. + - For na_elementsw_cluster, the Management IP (MIP) or hostname of the node to initiate the cluster creation from. + type: str + username: + required: true + description: + - Please ensure that the user has the adequate permissions. For more information, please read the official documentation + U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US). + aliases: ['user'] + type: str + password: + required: true + description: + - Password for the specified user. + aliases: ['pass'] + type: str + +requirements: + - The modules were developed with SolidFire 10.1 + - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python' + +notes: + - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform. + +''' diff --git a/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py new file mode 100644 index 000000000..4121bf8e7 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py @@ -0,0 +1,107 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2017, Sumit Kumar +# Copyright (c) 2017, Michael Price +# Copyright: (c) 2018, NetApp Ansible Team +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' +Common methods and constants +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +HAS_SF_SDK = False +SF_BYTE_MAP = dict( + # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000. + bytes=1, + b=1, + kb=1000, + mb=1000 ** 2, + gb=1000 ** 3, + tb=1000 ** 4, + pb=1000 ** 5, + eb=1000 ** 6, + zb=1000 ** 7, + yb=1000 ** 8 +) + +# uncomment this to log API calls +# import logging + +try: + from solidfire.factory import ElementFactory + import solidfire.common + HAS_SF_SDK = True +except ImportError: + HAS_SF_SDK = False + +COLLECTION_VERSION = "21.7.0" + + +def has_sf_sdk(): + return HAS_SF_SDK + + +def ontap_sf_host_argument_spec(): + + return dict( + hostname=dict(required=True, type='str'), + username=dict(required=True, type='str', aliases=['user']), + password=dict(required=True, type='str', aliases=['pass'], no_log=True) + ) + + +def create_sf_connection(module, hostname=None, port=None, raise_on_connection_error=False, timeout=None): + if hostname is None: + hostname = module.params['hostname'] + username = module.params['username'] + password = module.params['password'] + options = dict() + if port is not None: + options['port'] = port + if timeout is not None: + options['timeout'] = timeout + + if not HAS_SF_SDK: + module.fail_json(msg="the python SolidFire SDK module is required") + + try: + logging.basicConfig(filename='/tmp/elementsw_apis.log', level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s') + except NameError: + # logging was not imported + pass + + try: + return_val = ElementFactory.create(hostname, username, password, **options) + except (solidfire.common.ApiConnectionError, solidfire.common.ApiServerError) as exc: + if raise_on_connection_error: + raise exc + module.fail_json(msg=repr(exc)) + except Exception as exc: + raise Exception("Unable to create SF connection: %s" % repr(exc)) + return return_val diff --git a/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py new file mode 100644 index 000000000..2d8b92cfa --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py @@ -0,0 +1,206 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Copyright: (c) 2018, NetApp Ansible Team + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_native + +HAS_SF_SDK = False +try: + import solidfire.common + HAS_SF_SDK = True +except ImportError: + HAS_SF_SDK = False + + +def has_sf_sdk(): + return HAS_SF_SDK + + +class NaElementSWModule(object): + ''' Support class for common or shared functions ''' + def __init__(self, elem): + self.elem_connect = elem + self.parameters = dict() + + def get_volume(self, volume_id): + """ + Return volume details if volume exists for given volume_id + + :param volume_id: volume ID + :type volume_id: int + :return: Volume dict if found, None if not found + :rtype: dict + """ + volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id]) + for volume in volume_list.volumes: + if volume.volume_id == volume_id: + if str(volume.delete_time) == "": + return volume + return None + + def get_volume_id(self, vol_name, account_id): + """ + Return volume id from the given (valid) account_id if found + Return None if not found + + :param vol_name: Name of the volume + :type vol_name: str + :param account_id: Account ID + :type account_id: int + + :return: Volume ID of the first matching volume if found. None if not found. + :rtype: int + """ + volume_list = self.elem_connect.list_volumes_for_account(account_id=account_id) + for volume in volume_list.volumes: + if volume.name == vol_name: + # return volume_id + if str(volume.delete_time) == "": + return volume.volume_id + return None + + def volume_id_exists(self, volume_id): + """ + Return volume_id if volume exists for given volume_id + + :param volume_id: volume ID + :type volume_id: int + :return: Volume ID if found, None if not found + :rtype: int + """ + volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id]) + for volume in volume_list.volumes: + if volume.volume_id == volume_id: + if str(volume.delete_time) == "": + return volume.volume_id + return None + + def volume_exists(self, volume, account_id): + """ + Return volume_id if exists, None if not found + + :param volume: Volume ID or Name + :type volume: str + :param account_id: Account ID (valid) + :type account_id: int + :return: Volume ID if found, None if not found + """ + # If volume is an integer, get_by_id + if str(volume).isdigit(): + volume_id = int(volume) + try: + if self.volume_id_exists(volume_id): + return volume_id + except solidfire.common.ApiServerError: + # don't fail, continue and try get_by_name + pass + # get volume by name + volume_id = self.get_volume_id(volume, account_id) + return volume_id + + def get_snapshot(self, snapshot_id, volume_id): + """ + Return snapshot details if found + + :param snapshot_id: Snapshot ID or Name + :type snapshot_id: str + :param volume_id: Account ID (valid) + :type volume_id: int + :return: Snapshot dict if found, None if not found + :rtype: dict + """ + # mandate src_volume_id although not needed by sdk + snapshot_list = self.elem_connect.list_snapshots( + volume_id=volume_id) + for snapshot in snapshot_list.snapshots: + # if actual id is provided + if str(snapshot_id).isdigit() and snapshot.snapshot_id == int(snapshot_id): + return snapshot + # if snapshot name is provided + elif snapshot.name == snapshot_id: + return snapshot + return None + + @staticmethod + def map_qos_obj_to_dict(qos_obj): + ''' Take a QOS object and return a key, normalize the key names + Interestingly, the APIs are using different ids for create and get + ''' + mappings = [ + ('burst_iops', 'burstIOPS'), + ('min_iops', 'minIOPS'), + ('max_iops', 'maxIOPS'), + ] + qos_dict = vars(qos_obj) + # Align names to create API and module interface + for read, send in mappings: + if read in qos_dict: + qos_dict[send] = qos_dict.pop(read) + return qos_dict + + def get_qos_policy(self, name): + """ + Get QOS Policy + :description: Get QOS Policy object for a given name + :return: object, error + Policy object converted to dict if found, else None + Error text if error, else None + :rtype: dict/None, str/None + """ + try: + qos_policy_list_obj = self.elem_connect.list_qos_policies() + except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc: + error = "Error getting list of qos policies: %s" % to_native(exc) + return None, error + + policy_dict = dict() + if hasattr(qos_policy_list_obj, 'qos_policies'): + for policy in qos_policy_list_obj.qos_policies: + # Check and get policy object for a given name + if str(policy.qos_policy_id) == name: + policy_dict = vars(policy) + elif policy.name == name: + policy_dict = vars(policy) + if 'qos' in policy_dict: + policy_dict['qos'] = self.map_qos_obj_to_dict(policy_dict['qos']) + + return policy_dict if policy_dict else None, None + + def account_exists(self, account): + """ + Return account_id if account exists for given account id or name + Raises an exception if account does not exist + + :param account: Account ID or Name + :type account: str + :return: Account ID if found, None if not found + """ + # If account is an integer, get_by_id + if account.isdigit(): + account_id = int(account) + try: + result = self.elem_connect.get_account_by_id(account_id=account_id) + if result.account.account_id == account_id: + return account_id + except solidfire.common.ApiServerError: + # don't fail, continue and try get_by_name + pass + # get account by name, the method returns an Exception if account doesn't exist + result = self.elem_connect.get_account_by_name(username=account) + return result.account.account_id + + def set_element_attributes(self, source): + """ + Return telemetry attributes for the current execution + + :param source: name of the module + :type source: str + :return: a dict containing telemetry attributes + """ + attributes = {} + attributes['config-mgmt'] = 'ansible' + attributes['event-source'] = source + return attributes diff --git a/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..c2b02d3d2 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py @@ -0,0 +1,225 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018, NetApp Ansible Team +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def cmp(a, b): + """ + Python 3 does not have a cmp function, this will do the cmp. + :param a: first object to check + :param b: second object to check + :return: + """ + # convert to lower case for string comparison. + if a is None: + return -1 + if type(a) is str and type(b) is str: + a = a.lower() + b = b.lower() + # if list has string element, convert string to lower case. + if type(a) is list and type(b) is list: + a = [x.lower() if type(x) is str else x for x in a] + b = [x.lower() if type(x) is str else x for x in b] + a.sort() + b.sort() + return (a > b) - (a < b) + + +class NetAppModule(object): + ''' + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + ''' + + def __init__(self): + self.log = list() + self.changed = False + self.parameters = {'name': 'not intialized'} + # self.debug = list() + + def set_parameters(self, ansible_params): + self.parameters = dict() + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters + + def get_cd_action(self, current, desired): + ''' takes a desired state and a current state, and return an action: + create, delete, None + eg: + is_present = 'absent' + some_object = self.get_object(source) + if some_object is not None: + is_present = 'present' + action = cd_action(current=is_present, desired = self.desired.state()) + ''' + if 'state' in desired: + desired_state = desired['state'] + else: + desired_state = 'present' + + if current is None and desired_state == 'absent': + return None + if current is not None and desired_state == 'present': + return None + # change in state + self.changed = True + if current is not None: + return 'delete' + return 'create' + + def compare_and_update_values(self, current, desired, keys_to_compare): + updated_values = dict() + is_changed = False + for key in keys_to_compare: + if key in current: + if key in desired and desired[key] is not None: + if current[key] != desired[key]: + updated_values[key] = desired[key] + is_changed = True + else: + updated_values[key] = current[key] + else: + updated_values[key] = current[key] + + return updated_values, is_changed + + @staticmethod + def check_keys(current, desired): + ''' TODO: raise an error if keys do not match + with the exception of: + new_name, state in desired + ''' + pass + + @staticmethod + def compare_lists(current, desired, get_list_diff): + ''' compares two lists and return a list of elements that are either the desired elements or elements that are + modified from the current state depending on the get_list_diff flag + :param: current: current item attribute in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: list of attributes to be modified + :rtype: list + ''' + desired_diff_list = [item for item in desired if item not in current] # get what in desired and not in current + current_diff_list = [item for item in current if item not in desired] # get what in current but not in desired + + if desired_diff_list or current_diff_list: + # there are changes + if get_list_diff: + return desired_diff_list + else: + return desired + else: + return [] + + def get_modified_attributes(self, current, desired, get_list_diff=False, additional_keys=False): + ''' takes two dicts of attributes and return a dict of attributes that are + not in the current state + It is expected that all attributes of interest are listed in current and + desired. + The same assumption holds true for any nested directory. + TODO: This is actually not true for the ElementSW 'attributes' directory. + Practically it means you cannot add or remove a key in a modify. + :param: current: current attributes in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: dict of attributes to be modified + :rtype: dict + + NOTE: depending on the attribute, the caller may need to do a modify or a + different operation (eg move volume if the modified attribute is an + aggregate name) + ''' + # uncomment these 2 lines if needed + # self.log.append('current: %s' % repr(current)) + # self.log.append('desired: %s' % repr(desired)) + # if the object does not exist, we can't modify it + modified = dict() + if current is None: + return modified + + # error out if keys do not match + self.check_keys(current, desired) + + # collect changed attributes + for key, value in current.items(): + if key in desired and desired[key] is not None: + if type(value) is list: + modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired + if modified_list: + modified[key] = modified_list + elif type(value) is dict: + modified_dict = self.get_modified_attributes(value, desired[key], get_list_diff, additional_keys=True) + if modified_dict: + modified[key] = modified_dict + elif cmp(value, desired[key]) != 0: + modified[key] = desired[key] + if additional_keys: + for key, value in desired.items(): + if key not in current: + modified[key] = desired[key] + if modified: + self.changed = True + # Uncomment this line if needed + # self.log.append('modified: %s' % repr(modified)) + return modified + + def is_rename_action(self, source, target): + ''' takes a source and target object, and returns True + if a rename is required + eg: + source = self.get_object(source_name) + target = self.get_object(target_name) + action = is_rename_action(source, target) + :return: None for error, True for rename action, False otherwise + ''' + if source is None and target is None: + # error, do nothing + # cannot rename an non existent resource + # alternatively we could create B + return None + if source is not None and target is not None: + # error, do nothing + # idempotency (or) new_name_is_already_in_use + # alternatively we could delete B and rename A to B + return False + if source is None and target is not None: + # do nothing, maybe the rename was already done + return False + # source is not None and target is None: + # rename is in order + self.changed = True + return True diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py new file mode 100644 index 000000000..467ca415c --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py @@ -0,0 +1,397 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Element Software Access Group Manager +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_access_group + +short_description: NetApp Element Software Manage Access Groups +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, destroy, or update access groups on Element Software Cluster. + +options: + + state: + description: + - Whether the specified access group should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + from_name: + description: + - ID or Name of the access group to rename. + - Required to create a new access group called 'name' by renaming 'from_name'. + version_added: 2.8.0 + type: str + + name: + description: + - Name for the access group for create, modify and delete operations. + required: True + aliases: + - src_access_group_id + type: str + + initiators: + description: + - List of initiators to include in the access group. If unspecified, the access group will start out without configured initiators. + type: list + elements: str + + volumes: + description: + - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes. + - It accepts either volume_name or volume_id + type: list + elements: str + + account_id: + description: + - Account ID for the owner of this volume. + - It accepts either account_name or account_id + - if account_id is digit, it will consider as account_id + - If account_id is string, it will consider as account_name + version_added: 2.8.0 + type: str + + virtual_network_id: + description: + - The ID of the Element SW Software Cluster Virtual Network to associate the access group with. + type: int + + virtual_network_tags: + description: + - The tags of VLAN Virtual Network Tag to associate the access group with. + type: list + elements: str + + attributes: + description: List of Name/Value pairs in JSON object format. + type: dict + +''' + +EXAMPLES = """ + - name: Create Access Group + na_elementsw_access_group: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: AnsibleAccessGroup + volumes: [7,8] + account_id: 1 + + - name: Modify Access Group + na_elementsw_access_group: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: AnsibleAccessGroup-Renamed + account_id: 1 + attributes: {"volumes": [1,2,3], "virtual_network_id": 12345} + + - name: Rename Access Group + na_elementsw_access_group: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + from_name: AnsibleAccessGroup + name: AnsibleAccessGroup-Renamed + + - name: Delete Access Group + na_elementsw_access_group: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + name: 1 +""" + + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWAccessGroup(object): + """ + Element Software Volume Access Group + """ + + def __init__(self): + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + from_name=dict(required=False, type='str'), + name=dict(required=True, aliases=["src_access_group_id"], type='str'), + initiators=dict(required=False, type='list', elements='str'), + volumes=dict(required=False, type='list', elements='str'), + account_id=dict(required=False, type='str'), + virtual_network_id=dict(required=False, type='int'), + virtual_network_tags=dict(required=False, type='list', elements='str'), + attributes=dict(required=False, type='dict'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['account_id']) + ], + supports_check_mode=True + ) + + input_params = self.module.params + + # Set up state variables + self.state = input_params['state'] + self.from_name = input_params['from_name'] + self.access_group_name = input_params['name'] + self.initiators = input_params['initiators'] + self.volumes = input_params['volumes'] + self.account_id = input_params['account_id'] + self.virtual_network_id = input_params['virtual_network_id'] + self.virtual_network_tags = input_params['virtual_network_tags'] + self.attributes = input_params['attributes'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + if self.attributes is not None: + self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group')) + else: + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group') + + def get_access_group(self, name): + """ + Get Access Group + :description: Get Access Group object for a given name + + :return: object (Group object) + :rtype: object (Group object) + """ + access_groups_list = self.sfe.list_volume_access_groups() + group_obj = None + + for group in access_groups_list.volume_access_groups: + # Check and get access_group object for a given name + if str(group.volume_access_group_id) == name: + group_obj = group + elif group.name == name: + group_obj = group + + return group_obj + + def get_account_id(self): + # Validate account id + # Return account_id if found, None otherwise + try: + account_id = self.elementsw_helper.account_exists(self.account_id) + return account_id + except solidfire.common.ApiServerError: + return None + + def get_volume_ids(self): + # Validate volume_ids + # Return volume ids if found, fail if not found + volume_ids = [] + for volume in self.volumes: + volume_id = self.elementsw_helper.volume_exists(volume, self.account_id) + if volume_id: + volume_ids.append(volume_id) + else: + self.module.fail_json(msg='Specified volume %s does not exist' % volume) + return volume_ids + + def create_access_group(self): + """ + Create the Access Group + """ + try: + self.sfe.create_volume_access_group(name=self.access_group_name, + initiators=self.initiators, + volumes=self.volumes, + virtual_network_id=self.virtual_network_id, + virtual_network_tags=self.virtual_network_tags, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg="Error creating volume access group %s: %s" % + (self.access_group_name, to_native(e)), exception=traceback.format_exc()) + + def delete_access_group(self): + """ + Delete the Access Group + """ + try: + self.sfe.delete_volume_access_group(volume_access_group_id=self.group_id) + + except Exception as e: + self.module.fail_json(msg="Error deleting volume access group %s: %s" % + (self.access_group_name, to_native(e)), + exception=traceback.format_exc()) + + def update_access_group(self): + """ + Update the Access Group if the access_group already exists + """ + try: + self.sfe.modify_volume_access_group(volume_access_group_id=self.group_id, + virtual_network_id=self.virtual_network_id, + virtual_network_tags=self.virtual_network_tags, + initiators=self.initiators, + volumes=self.volumes, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg="Error updating volume access group %s: %s" % + (self.access_group_name, to_native(e)), exception=traceback.format_exc()) + + def rename_access_group(self): + """ + Rename the Access Group to the new name + """ + try: + self.sfe.modify_volume_access_group(volume_access_group_id=self.from_group_id, + virtual_network_id=self.virtual_network_id, + virtual_network_tags=self.virtual_network_tags, + name=self.access_group_name, + initiators=self.initiators, + volumes=self.volumes, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg="Error updating volume access group %s: %s" % + (self.from_name, to_native(e)), exception=traceback.format_exc()) + + def apply(self): + """ + Process the access group operation on the Element Software Cluster + """ + changed = False + action = None + + input_account_id = self.account_id + if self.account_id is not None: + self.account_id = self.get_account_id() + if self.state == 'present' and self.volumes is not None: + if self.account_id: + self.volumes = self.get_volume_ids() + else: + self.module.fail_json(msg='Error: Specified account id "%s" does not exist.' % str(input_account_id)) + + group_detail = self.get_access_group(self.access_group_name) + + if group_detail is not None: + # If access group found + self.group_id = group_detail.volume_access_group_id + + if self.state == "absent": + action = 'delete' + changed = True + else: + # If state - present, check for any parameter of exising group needs modification. + if self.volumes is not None and len(self.volumes) > 0: + # Compare the volume list + if not group_detail.volumes: + # If access group does not have any volume attached + action = 'update' + changed = True + else: + for volumeID in group_detail.volumes: + if volumeID not in self.volumes: + action = 'update' + changed = True + break + + elif self.initiators is not None and group_detail.initiators != self.initiators: + action = 'update' + changed = True + + elif self.virtual_network_id is not None or self.virtual_network_tags is not None: + action = 'update' + changed = True + + else: + # access_group does not exist + if self.state == "present" and self.from_name is not None: + group_detail = self.get_access_group(self.from_name) + if group_detail is not None: + # If resource pointed by from_name exists, rename the access_group to name + self.from_group_id = group_detail.volume_access_group_id + action = 'rename' + changed = True + else: + # If resource pointed by from_name does not exists, error out + self.module.fail_json(msg="Resource does not exist : %s" % self.from_name) + elif self.state == "present": + # If from_name is not defined, Create from scratch. + action = 'create' + changed = True + + if changed and not self.module.check_mode: + if action == 'create': + self.create_access_group() + elif action == 'rename': + self.rename_access_group() + elif action == 'update': + self.update_access_group() + elif action == 'delete': + self.delete_access_group() + + self.module.exit_json(changed=changed) + + +def main(): + """ + Main function + """ + na_elementsw_access_group = ElementSWAccessGroup() + na_elementsw_access_group.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py new file mode 100644 index 000000000..af9053a13 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py @@ -0,0 +1,247 @@ +#!/usr/bin/python + +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Element Software Access Group Volumes +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_access_group_volumes + +short_description: NetApp Element Software Add/Remove Volumes to/from Access Group +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 20.1.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Add or remove volumes to/from access group on Element Software Cluster. + +options: + + state: + description: + - Whether the specified volumes should exist or not for this access group. + choices: ['present', 'absent'] + default: present + type: str + + access_group: + description: + - Name or id for the access group to add volumes to, or remove volumes from + required: true + type: str + + volumes: + description: + - List of volumes to add/remove from/to the access group. + - It accepts either volume_name or volume_id + required: True + type: list + elements: str + + account_id: + description: + - Account ID for the owner of this volume. + - It accepts either account_name or account_id + - if account_id is numeric, look up for account_id first, then look up for account_name + - If account_id is not numeric, look up for account_name + required: true + type: str +''' + +EXAMPLES = """ + - name: Add Volumes to Access Group + na_elementsw_access_group: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + access_group: AnsibleAccessGroup + volumes: ['vol7','vol8','vol9'] + account_id: '1' + + - name: Remove Volumes from Access Group + na_elementsw_access_group: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + access_group: AnsibleAccessGroup + volumes: ['vol7','vol9'] + account_id: '1' +""" + + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWAccessGroupVolumes(object): + """ + Element Software Access Group Volumes + """ + + def __init__(self): + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + access_group=dict(required=True, type='str'), + volumes=dict(required=True, type='list', elements='str'), + account_id=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + input_params = self.module.params + + # Set up state variables + self.state = input_params['state'] + self.access_group_name = input_params['access_group'] + self.volumes = input_params['volumes'] + self.account_id = input_params['account_id'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group') + + def get_access_group(self, name): + """ + Get Access Group + :description: Get Access Group object for a given name + + :return: object (Group object) + :rtype: object (Group object) + """ + access_groups_list = self.sfe.list_volume_access_groups() + group_obj = None + + for group in access_groups_list.volume_access_groups: + # Check and get access_group object for a given name + if str(group.volume_access_group_id) == name: + group_obj = group + elif group.name == name: + group_obj = group + + return group_obj + + def get_account_id(self): + # Validate account id + # Return account_id if found, None otherwise + try: + account_id = self.elementsw_helper.account_exists(self.account_id) + return account_id + except solidfire.common.ApiServerError: + return None + + def get_volume_ids(self): + # Validate volume_ids + # Return volume ids if found, fail if not found + volume_ids = [] + for volume in self.volumes: + volume_id = self.elementsw_helper.volume_exists(volume, self.account_id) + if volume_id: + volume_ids.append(volume_id) + else: + self.module.fail_json(msg='Error: Specified volume %s does not exist' % volume) + return volume_ids + + def update_access_group(self, volumes): + """ + Update the Access Group if the access_group already exists + """ + try: + self.sfe.modify_volume_access_group(volume_access_group_id=self.group_id, + volumes=volumes) + except Exception as e: + self.module.fail_json(msg="Error updating volume access group %s: %s" % + (self.access_group_name, to_native(e)), exception=traceback.format_exc()) + + def apply(self): + """ + Process the volume add/remove operations for the access group on the Element Software Cluster + """ + changed = False + input_account_id = self.account_id + + if self.account_id is not None: + self.account_id = self.get_account_id() + if self.account_id is None: + self.module.fail_json(msg='Error: Specified account id "%s" does not exist.' % str(input_account_id)) + + # get volume data + self.volume_ids = self.get_volume_ids() + group_detail = self.get_access_group(self.access_group_name) + if group_detail is None: + self.module.fail_json(msg='Error: Specified access group "%s" does not exist for account id: %s.' % (self.access_group_name, str(input_account_id))) + self.group_id = group_detail.volume_access_group_id + volumes = group_detail.volumes + + # compare expected list of volumes to existing one + if self.state == "absent": + # remove volumes if present in access group + volumes = [vol for vol in group_detail.volumes if vol not in self.volume_ids] + else: + # add volumes if not already present + volumes = [vol for vol in self.volume_ids if vol not in group_detail.volumes] + volumes.extend(group_detail.volumes) + + # update if there is a change + if len(volumes) != len(group_detail.volumes): + if not self.module.check_mode: + self.update_access_group(volumes) + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + """ + Main function + """ + na_elementsw_access_group_volumes = ElementSWAccessGroupVolumes() + na_elementsw_access_group_volumes.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py new file mode 100644 index 000000000..862753747 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py @@ -0,0 +1,340 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Element Software Account Manager +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_account + +short_description: NetApp Element Software Manage Accounts +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, destroy, or update accounts on Element SW + +options: + + state: + description: + - Whether the specified account should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + element_username: + description: + - Unique username for this account. (May be 1 to 64 characters in length). + required: true + aliases: + - account_id + type: str + + from_name: + description: + - ID or Name of the account to rename. + - Required to create an account called 'element_username' by renaming 'from_name'. + version_added: 2.8.0 + type: str + + initiator_secret: + description: + - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable. + - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret. + - If not specified, a random secret is created. + type: str + + target_secret: + description: + - CHAP secret to use for the target (mutual CHAP authentication). + - Should be 12-16 characters long and impenetrable. + - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret. + - If not specified, a random secret is created. + type: str + + attributes: + description: List of Name/Value pairs in JSON object format. + type: dict + + status: + description: + - Status of the account. + type: str + +''' + +EXAMPLES = """ +- name: Create Account + na_elementsw_account: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + element_username: TenantA + +- name: Modify Account + na_elementsw_account: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + status: locked + element_username: TenantA + +- name: Rename Account + na_elementsw_account: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + element_username: TenantA_Renamed + from_name: TenantA + +- name: Rename and modify Account + na_elementsw_account: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + status: locked + element_username: TenantA_Renamed + from_name: TenantA + +- name: Delete Account + na_elementsw_account: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + element_username: TenantA_Renamed +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWAccount(object): + """ + Element SW Account + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + element_username=dict(required=True, aliases=["account_id"], type='str'), + from_name=dict(required=False, default=None), + initiator_secret=dict(required=False, type='str', no_log=True), + target_secret=dict(required=False, type='str', no_log=True), + attributes=dict(required=False, type='dict'), + status=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + params = self.module.params + + # set up state variables + self.state = params.get('state') + self.element_username = params.get('element_username') + self.from_name = params.get('from_name') + self.initiator_secret = params.get('initiator_secret') + self.target_secret = params.get('target_secret') + self.attributes = params.get('attributes') + self.status = params.get('status') + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the Element SW Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + if self.attributes is not None: + self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_account')) + else: + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_account') + + def get_account(self, username): + """ + Get Account + :description: Get Account object from account id or name + + :return: Details about the account. None if not found. + :rtype: object (Account object) + """ + + account_list = self.sfe.list_accounts() + + for account in account_list.accounts: + # Check and get account object for a given name + if str(account.account_id) == username: + return account + elif account.username == username: + return account + return None + + def create_account(self): + """ + Create the Account + """ + try: + self.sfe.add_account(username=self.element_username, + initiator_secret=self.initiator_secret, + target_secret=self.target_secret, + attributes=self.attributes) + except Exception as e: + self.module.fail_json(msg='Error creating account %s: %s' % (self.element_username, to_native(e)), + exception=traceback.format_exc()) + + def delete_account(self): + """ + Delete the Account + """ + try: + self.sfe.remove_account(account_id=self.account_id) + + except Exception as e: + self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)), + exception=traceback.format_exc()) + + def rename_account(self): + """ + Rename the Account + """ + try: + self.sfe.modify_account(account_id=self.account_id, + username=self.element_username, + status=self.status, + initiator_secret=self.initiator_secret, + target_secret=self.target_secret, + attributes=self.attributes) + + except Exception as e: + self.module.fail_json(msg='Error renaming account %s: %s' % (self.account_id, to_native(e)), + exception=traceback.format_exc()) + + def update_account(self): + """ + Update the Account if account already exists + """ + try: + self.sfe.modify_account(account_id=self.account_id, + status=self.status, + initiator_secret=self.initiator_secret, + target_secret=self.target_secret, + attributes=self.attributes) + + except Exception as e: + self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)), + exception=traceback.format_exc()) + + def apply(self): + """ + Process the account operation on the Element OS Cluster + """ + changed = False + update_account = False + account_detail = self.get_account(self.element_username) + + if account_detail is None and self.state == 'present': + changed = True + + elif account_detail is not None: + # If account found + self.account_id = account_detail.account_id + + if self.state == 'absent': + changed = True + else: + # If state - present, check for any parameter of exising account needs modification. + if account_detail.username is not None and self.element_username is not None and \ + account_detail.username != self.element_username: + update_account = True + changed = True + elif account_detail.status is not None and self.status is not None \ + and account_detail.status != self.status: + update_account = True + changed = True + + elif account_detail.initiator_secret is not None and self.initiator_secret is not None \ + and account_detail.initiator_secret != self.initiator_secret: + update_account = True + changed = True + + elif account_detail.target_secret is not None and self.target_secret is not None \ + and account_detail.target_secret != self.target_secret: + update_account = True + changed = True + + elif account_detail.attributes is not None and self.attributes is not None \ + and account_detail.attributes != self.attributes: + update_account = True + changed = True + if changed: + if self.module.check_mode: + # Skipping the changes + pass + else: + if self.state == 'present': + if update_account: + self.update_account() + else: + if self.from_name is not None: + # If from_name is defined + account_exists = self.get_account(self.from_name) + if account_exists is not None: + # If resource pointed by from_name exists, rename the account to name + self.account_id = account_exists.account_id + self.rename_account() + else: + # If resource pointed by from_name does not exists, error out + self.module.fail_json(msg="Resource does not exist : %s" % self.from_name) + else: + # If from_name is not defined, create from scratch. + self.create_account() + elif self.state == 'absent': + self.delete_account() + + self.module.exit_json(changed=changed) + + +def main(): + """ + Main function + """ + na_elementsw_account = ElementSWAccount() + na_elementsw_account.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py new file mode 100644 index 000000000..7ad46648a --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py @@ -0,0 +1,233 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_admin_users + +short_description: NetApp Element Software Manage Admin Users +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, destroy, or update admin users on SolidFire + +options: + + state: + description: + - Whether the specified account should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + element_username: + description: + - Unique username for this account. (May be 1 to 64 characters in length). + required: true + type: str + + element_password: + description: + - The password for the new admin account. Setting the password attribute will always reset your password, even if the password is the same + type: str + + acceptEula: + description: + - Boolean, true for accepting Eula, False Eula + type: bool + + access: + description: + - A list of types the admin has access to + type: list + elements: str +''' + +EXAMPLES = """ + - name: Add admin user + na_elementsw_admin_users: + state: present + username: "{{ admin_user_name }}" + password: "{{ admin_password }}" + hostname: "{{ hostname }}" + element_username: carchi8py + element_password: carchi8py + acceptEula: True + access: accounts,drives + + - name: modify admin user + na_elementsw_admin_users: + state: present + username: "{{ admin_user_name }}" + password: "{{ admin_password }}" + hostname: "{{ hostname }}" + element_username: carchi8py + element_password: carchi8py12 + acceptEula: True + access: accounts,drives,nodes + + - name: delete admin user + na_elementsw_admin_users: + state: absent + username: "{{ admin_user_name }}" + password: "{{ admin_password }}" + hostname: "{{ hostname }}" + element_username: carchi8py +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class NetAppElementSWAdminUser(object): + """ + Class to set, modify and delete admin users on ElementSW box + """ + + def __init__(self): + """ + Initialize the NetAppElementSWAdminUser class. + """ + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + element_username=dict(required=True, type='str'), + element_password=dict(required=False, type='str', no_log=True), + acceptEula=dict(required=False, type='bool'), + access=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + param = self.module.params + # set up state variables + self.state = param['state'] + self.element_username = param['element_username'] + self.element_password = param['element_password'] + self.acceptEula = param['acceptEula'] + self.access = param['access'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_admin_users') + + def does_admin_user_exist(self): + """ + Checks to see if an admin user exists or not + :return: True if the user exist, False if it dose not exist + """ + admins_list = self.sfe.list_cluster_admins() + for admin in admins_list.cluster_admins: + if admin.username == self.element_username: + return True + return False + + def get_admin_user(self): + """ + Get the admin user object + :return: the admin user object + """ + admins_list = self.sfe.list_cluster_admins() + for admin in admins_list.cluster_admins: + if admin.username == self.element_username: + return admin + return None + + def modify_admin_user(self): + """ + Modify a admin user. If a password is set the user will be modified as there is no way to + compare a new password with an existing one + :return: if a user was modified or not + """ + changed = False + admin_user = self.get_admin_user() + if self.access is not None and len(self.access) > 0: + for access in self.access: + if access not in admin_user.access: + changed = True + if changed and not self.module.check_mode: + self.sfe.modify_cluster_admin(cluster_admin_id=admin_user.cluster_admin_id, + access=self.access, + password=self.element_password, + attributes=self.attributes) + + return changed + + def add_admin_user(self): + """ + Add's a new admin user to the element cluster + :return: nothing + """ + self.sfe.add_cluster_admin(username=self.element_username, + password=self.element_password, + access=self.access, + accept_eula=self.acceptEula, + attributes=self.attributes) + + def delete_admin_user(self): + """ + Deletes an existing admin user from the element cluster + :return: nothing + """ + admin_user = self.get_admin_user() + self.sfe.remove_cluster_admin(cluster_admin_id=admin_user.cluster_admin_id) + + def apply(self): + """ + determines which method to call to set, delete or modify admin users + :return: + """ + changed = False + if self.state == "present": + if self.does_admin_user_exist(): + changed = self.modify_admin_user() + else: + if not self.module.check_mode: + self.add_admin_user() + changed = True + else: + if self.does_admin_user_exist(): + if not self.module.check_mode: + self.delete_admin_user() + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppElementSWAdminUser() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py new file mode 100644 index 000000000..e81e7c5ea --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Element Software Backup Manager +""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' + +module: na_elementsw_backup + +short_description: NetApp Element Software Create Backups +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create backup + +options: + + src_volume_id: + description: + - ID of the backup source volume. + required: true + aliases: + - volume_id + type: str + + dest_hostname: + description: + - hostname for the backup source cluster + - will be set equal to hostname if not specified + required: false + type: str + + dest_username: + description: + - username for the backup destination cluster + - will be set equal to username if not specified + required: false + type: str + + dest_password: + description: + - password for the backup destination cluster + - will be set equal to password if not specified + required: false + type: str + + dest_volume_id: + description: + - ID of the backup destination volume + required: true + type: str + + format: + description: + - Backup format to use + choices: ['native','uncompressed'] + required: false + default: 'native' + type: str + + script: + description: + - the backup script to be executed + required: false + type: str + + script_parameters: + description: + - the backup script parameters + required: false + type: dict + +''' + +EXAMPLES = """ +na_elementsw_backup: + hostname: "{{ source_cluster_hostname }}" + username: "{{ source_cluster_username }}" + password: "{{ source_cluster_password }}" + src_volume_id: 1 + dest_hostname: "{{ destination_cluster_hostname }}" + dest_username: "{{ destination_cluster_username }}" + dest_password: "{{ destination_cluster_password }}" + dest_volume_id: 3 + format: native +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule +import time + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWBackup(object): + ''' class to handle backup operations ''' + + def __init__(self): + """ + Setup Ansible parameters and SolidFire connection + """ + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + + self.argument_spec.update(dict( + + src_volume_id=dict(aliases=['volume_id'], required=True, type='str'), + dest_hostname=dict(required=False, type='str'), + dest_username=dict(required=False, type='str'), + dest_password=dict(required=False, type='str', no_log=True), + dest_volume_id=dict(required=True, type='str'), + format=dict(required=False, choices=['native', 'uncompressed'], default='native'), + script=dict(required=False, type='str'), + script_parameters=dict(required=False, type='dict') + + + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_together=[['script', 'script_parameters']], + supports_check_mode=True + ) + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + + # If destination cluster details are not specified , set the destination to be the same as the source + if self.module.params["dest_hostname"] is None: + self.module.params["dest_hostname"] = self.module.params["hostname"] + if self.module.params["dest_username"] is None: + self.module.params["dest_username"] = self.module.params["username"] + if self.module.params["dest_password"] is None: + self.module.params["dest_password"] = self.module.params["password"] + + params = self.module.params + + # establish a connection to both source and destination elementsw clusters + self.src_connection = netapp_utils.create_sf_connection(self.module) + self.module.params["username"] = params["dest_username"] + self.module.params["password"] = params["dest_password"] + self.module.params["hostname"] = params["dest_hostname"] + self.dest_connection = netapp_utils.create_sf_connection(self.module) + + self.elementsw_helper = NaElementSWModule(self.src_connection) + + # add telemetry attributes + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_backup') + + def apply(self): + """ + Apply backup creation logic + """ + self.create_backup() + self.module.exit_json(changed=True) + + def create_backup(self): + """ + Create backup + """ + + # Start volume write on destination cluster + + try: + write_obj = self.dest_connection.start_bulk_volume_write(volume_id=self.module.params["dest_volume_id"], + format=self.module.params["format"], + attributes=self.attributes) + write_key = write_obj.key + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error starting bulk write on destination cluster", exception=to_native(err)) + + # Set script parameters if not passed by user + # These parameters are equivalent to the options used when a backup is executed via the GUI + + if self.module.params["script"] is None and self.module.params["script_parameters"] is None: + + self.module.params["script"] = 'bv_internal.py' + self.module.params["script_parameters"] = {"write": { + "mvip": self.module.params["dest_hostname"], + "username": self.module.params["dest_username"], + "password": self.module.params["dest_password"], + "key": write_key, + "endpoint": "solidfire", + "format": self.module.params["format"]}, + "range": {"lba": 0, "blocks": 244224}} + + # Start volume read on source cluster + + try: + read_obj = self.src_connection.start_bulk_volume_read(self.module.params["src_volume_id"], + self.module.params["format"], + script=self.module.params["script"], + script_parameters=self.module.params["script_parameters"], + attributes=self.attributes) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error starting bulk read on source cluster", exception=to_native(err)) + + # Poll job status until it has completed + # SF will automatically timeout if the job is not successful after certain amount of time + + completed = False + while completed is not True: + # Sleep between polling iterations to reduce api load + time.sleep(2) + try: + result = self.src_connection.get_async_result(read_obj.async_handle, True) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Unable to check backup job status", exception=to_native(err)) + + if result["status"] != 'running': + completed = True + if 'error' in result: + self.module.fail_json(msg=result['error']['message']) + + +def main(): + """ Run backup operation""" + vol_obj = ElementSWBackup() + vol_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py new file mode 100644 index 000000000..2f288dc3a --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py @@ -0,0 +1,154 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_check_connections + +short_description: NetApp Element Software Check connectivity to MVIP and SVIP. +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Used to test the management connection to the cluster. +- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity. + +options: + + skip: + description: + - Skip checking connection to SVIP or MVIP. + choices: ['svip', 'mvip'] + type: str + + mvip: + description: + - Optionally, use to test connection of a different MVIP. + - This is not needed to test the connection to the target cluster. + type: str + + svip: + description: + - Optionally, use to test connection of a different SVIP. + - This is not needed to test the connection to the target cluster. + type: str + +''' + + +EXAMPLES = """ + - name: Check connections to MVIP and SVIP + na_elementsw_check_connections: + hostname: "{{ solidfire_hostname }}" + username: "{{ solidfire_username }}" + password: "{{ solidfire_password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class NaElementSWConnection(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']), + mvip=dict(required=False, type='str', default=None), + svip=dict(required=False, type='str', default=None) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('skip', 'svip', ['mvip']), + ('skip', 'mvip', ['svip']) + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.module.params.copy() + self.msg = "" + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the ElementSW Python SDK") + else: + self.elem = netapp_utils.create_sf_connection(self.module, port=442) + + def check_mvip_connection(self): + """ + Check connection to MVIP + + :return: true if connection was successful, false otherwise. + :rtype: bool + """ + try: + test = self.elem.test_connect_mvip(mvip=self.parameters['mvip']) + # Todo - Log details about the test + return test.details.connected + + except Exception as e: + self.msg += 'Error checking connection to MVIP: %s' % to_native(e) + return False + + def check_svip_connection(self): + """ + Check connection to SVIP + + :return: true if connection was successful, false otherwise. + :rtype: bool + """ + try: + test = self.elem.test_connect_svip(svip=self.parameters['svip']) + # Todo - Log details about the test + return test.details.connected + except Exception as e: + self.msg += 'Error checking connection to SVIP: %s' % to_native(e) + return False + + def apply(self): + passed = False + if self.parameters.get('skip') is None: + # Set failed and msg + passed = self.check_mvip_connection() + # check if both connections have passed + passed &= self.check_svip_connection() + elif self.parameters['skip'] == 'mvip': + passed |= self.check_svip_connection() + elif self.parameters['skip'] == 'svip': + passed |= self.check_mvip_connection() + if not passed: + self.module.fail_json(msg=self.msg) + else: + self.module.exit_json() + + +def main(): + connect_obj = NaElementSWConnection() + connect_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py new file mode 100644 index 000000000..ede60cae3 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Initialize Cluster +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_cluster + +short_description: NetApp Element Software Create Cluster +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Initialize Element Software node ownership to form a cluster. + - If the cluster does not exist, username/password are still required but ignored for initial creation. + - username/password are used as the node credentials to see if the cluster already exists. + - username/password can also be used to set the cluster credentials. + - If the cluster already exists, no error is returned, but changed is set to false. + - Cluster modifications are not supported and are ignored. + +options: + management_virtual_ip: + description: + - Floating (virtual) IP address for the cluster on the management network. + required: true + type: str + + storage_virtual_ip: + description: + - Floating (virtual) IP address for the cluster on the storage (iSCSI) network. + required: true + type: str + + replica_count: + description: + - Number of replicas of each piece of data to store in the cluster. + default: 2 + type: int + + cluster_admin_username: + description: + - Username for the cluster admin. + - If not provided, default to username. + type: str + + cluster_admin_password: + description: + - Initial password for the cluster admin account. + - If not provided, default to password. + type: str + + accept_eula: + description: + - Required to indicate your acceptance of the End User License Agreement when creating this cluster. + - To accept the EULA, set this parameter to true. + type: bool + + nodes: + description: + - Storage IP (SIP) addresses of the initial set of nodes making up the cluster. + - nodes IP must be in the list. + required: true + type: list + elements: str + + attributes: + description: + - List of name-value pairs in JSON object format. + type: dict + + timeout: + description: + - Time to wait for cluster creation to complete. + default: 100 + type: int + version_added: 20.8.0 + + fail_if_cluster_already_exists_with_larger_ensemble: + description: + - If the cluster exists, the default is to verify that I(nodes) is a superset of the existing ensemble. + - A superset is accepted because some nodes may have a different role. + - But the module reports an error if the existing ensemble contains a node not listed in I(nodes). + - This checker is disabled when this option is set to false. + default: true + type: bool + version_added: 20.8.0 + + encryption: + description: to enable or disable encryption at rest + type: bool + version_added: 20.10.0 + + order_number: + description: (experimental) order number as provided by NetApp + type: str + version_added: 20.10.0 + + serial_number: + description: (experimental) serial number as provided by NetApp + type: str + version_added: 20.10.0 +''' + +EXAMPLES = """ + + - name: Initialize new cluster + tags: + - elementsw_cluster + na_elementsw_cluster: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + management_virtual_ip: 10.226.108.32 + storage_virtual_ip: 10.226.109.68 + replica_count: 2 + accept_eula: true + nodes: + - 10.226.109.72 + - 10.226.109.74 +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWCluster(object): + """ + Element Software Initialize node with ownership for cluster formation + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + management_virtual_ip=dict(required=True, type='str'), + storage_virtual_ip=dict(required=True, type='str'), + replica_count=dict(required=False, type='int', default=2), + cluster_admin_username=dict(required=False, type='str'), + cluster_admin_password=dict(required=False, type='str', no_log=True), + accept_eula=dict(required=False, type='bool'), + nodes=dict(required=True, type='list', elements='str'), + attributes=dict(required=False, type='dict', default=None), + timeout=dict(required=False, type='int', default=100), + fail_if_cluster_already_exists_with_larger_ensemble=dict(required=False, type='bool', default=True), + encryption=dict(required=False, type='bool'), + order_number=dict(required=False, type='str'), + serial_number=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + input_params = self.module.params + + self.management_virtual_ip = input_params['management_virtual_ip'] + self.storage_virtual_ip = input_params['storage_virtual_ip'] + self.replica_count = input_params['replica_count'] + self.accept_eula = input_params.get('accept_eula') + self.attributes = input_params.get('attributes') + self.nodes = input_params['nodes'] + self.cluster_admin_username = input_params['username'] if input_params.get('cluster_admin_username') is None else input_params['cluster_admin_username'] + self.cluster_admin_password = input_params['password'] if input_params.get('cluster_admin_password') is None else input_params['cluster_admin_password'] + self.fail_if_cluster_already_exists_with_larger_ensemble = input_params['fail_if_cluster_already_exists_with_larger_ensemble'] + self.encryption = input_params['encryption'] + self.order_number = input_params['order_number'] + self.serial_number = input_params['serial_number'] + self.debug = list() + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + + # 442 for node APIs, 443 (default) for cluster APIs + for role, port in [('node', 442), ('cluster', 443)]: + try: + # even though username/password should be optional, create_sf_connection fails if not set + conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port, timeout=input_params['timeout']) + if role == 'node': + self.sfe_node = conn + else: + self.sfe_cluster = conn + except netapp_utils.solidfire.common.ApiConnectionError as exc: + if str(exc) == "Bad Credentials": + msg = 'Most likely the cluster is already created.' + msg += ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster' + msg += ' Even though credentials are not required for the first create, they are needed to check whether the cluster already exists.' + msg += ' Cluster reported: %s' % repr(exc) + else: + msg = 'Failed to create connection: %s' % repr(exc) + self.module.fail_json(msg=msg) + except Exception as exc: + self.module.fail_json(msg='Failed to connect: %s' % repr(exc)) + + self.elementsw_helper = NaElementSWModule(self.sfe_cluster) + + # add telemetry attributes + if self.attributes is not None: + self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_cluster')) + else: + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_cluster') + + def get_node_cluster_info(self): + """ + Get Cluster Info - using node API + """ + try: + info = self.sfe_node.get_config() + self.debug.append(repr(info.config.cluster)) + return info.config.cluster + except Exception as exc: + self.debug.append("port: %s, %s" % (str(self.sfe_node._port), repr(exc))) + return None + + def check_cluster_exists(self): + """ + validate if cluster exists with list of nodes + error out if something is found but with different nodes + return a tuple (found, info) + found is True if found, False if not found + """ + info = self.get_node_cluster_info() + if info is None: + return False + ensemble = getattr(info, 'ensemble', None) + if not ensemble: + return False + # format is 'id:IP' + nodes = [x.split(':', 1)[1] for x in ensemble] + current_ensemble_nodes = set(nodes) if ensemble else set() + requested_nodes = set(self.nodes) if self.nodes else set() + extra_ensemble_nodes = current_ensemble_nodes - requested_nodes + # TODO: the cluster may have more nodes than what is reported in ensemble: + # nodes_not_in_ensemble = requested_nodes - current_ensemble_nodes + # So it's OK to find some missing nodes, but not very deterministic. + # eg some kind of backup nodes could be in nodes_not_in_ensemble. + if extra_ensemble_nodes and self.fail_if_cluster_already_exists_with_larger_ensemble: + msg = 'Error: found existing cluster with more nodes in ensemble. Cluster: %s, extra nodes: %s' %\ + (getattr(info, 'cluster', 'not found'), extra_ensemble_nodes) + msg += '. Cluster info: %s' % repr(info) + self.module.fail_json(msg=msg) + if extra_ensemble_nodes: + self.debug.append("Extra ensemble nodes: %s" % extra_ensemble_nodes) + nodes_not_in_ensemble = requested_nodes - current_ensemble_nodes + if nodes_not_in_ensemble: + self.debug.append("Extra requested nodes not in ensemble: %s" % nodes_not_in_ensemble) + return True + + def create_cluster_api(self, options): + ''' Call send_request directly rather than using the SDK if new fields are present + The new SDK will support these in version 1.17 (Nov or Feb) + ''' + extra_options = ['enableSoftwareEncryptionAtRest', 'orderNumber', 'serialNumber'] + if not any((item in options for item in extra_options)): + # use SDK + return self.sfe_cluster.create_cluster(**options) + + # call directly the API as the SDK is not updated yet + params = { + "mvip": options['mvip'], + "svip": options['svip'], + "repCount": options['rep_count'], + "username": options['username'], + "password": options['password'], + "nodes": options['nodes'], + } + if options['accept_eula'] is not None: + params["acceptEula"] = options['accept_eula'] + if options['attributes'] is not None: + params["attributes"] = options['attributes'] + for option in extra_options: + if options.get(option): + params[option] = options[option] + + # There is no adaptor. + return self.sfe_cluster.send_request( + 'CreateCluster', + netapp_utils.solidfire.CreateClusterResult, + params, + since=None + ) + + def create_cluster(self): + """ + Create Cluster + """ + options = { + 'mvip': self.management_virtual_ip, + 'svip': self.storage_virtual_ip, + 'rep_count': self.replica_count, + 'accept_eula': self.accept_eula, + 'nodes': self.nodes, + 'attributes': self.attributes, + 'username': self.cluster_admin_username, + 'password': self.cluster_admin_password + } + if self.encryption is not None: + options['enableSoftwareEncryptionAtRest'] = self.encryption + if self.order_number is not None: + options['orderNumber'] = self.order_number + if self.serial_number is not None: + options['serialNumber'] = self.serial_number + + return_msg = 'created' + try: + # does not work as node even though documentation says otherwise + # running as node, this error is reported: 500 xUnknownAPIMethod method=CreateCluster + self.create_cluster_api(options) + except netapp_utils.solidfire.common.ApiServerError as exc: + # not sure how this can happen, but the cluster may already exists + if 'xClusterAlreadyCreated' not in str(exc.message): + self.module.fail_json(msg='Error creating cluster %s' % to_native(exc), exception=traceback.format_exc()) + return_msg = 'already_exists: %s' % str(exc.message) + except Exception as exc: + self.module.fail_json(msg='Error creating cluster %s' % to_native(exc), exception=traceback.format_exc()) + return return_msg + + def apply(self): + """ + Check connection and initialize node with cluster ownership + """ + changed = False + result_message = None + exists = self.check_cluster_exists() + if exists: + result_message = "cluster already exists" + else: + changed = True + if not self.module.check_mode: + result_message = self.create_cluster() + if result_message.startswith('already_exists:'): + changed = False + self.module.exit_json(changed=changed, msg=result_message, debug=self.debug) + + +def main(): + """ + Main function + """ + na_elementsw_cluster = ElementSWCluster() + na_elementsw_cluster.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py new file mode 100644 index 000000000..94b5c17dc --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Configure cluster +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_elementsw_cluster_config + +short_description: Configure Element SW Cluster +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Configure Element Software cluster. + +options: + modify_cluster_full_threshold: + description: + - The capacity level at which the cluster generates an event + - Requires a stage3_block_threshold_percent or + - max_metadata_over_provision_factor or + - stage2_aware_threshold + suboptions: + stage3_block_threshold_percent: + description: + - The percentage below the "Error" threshold that triggers a cluster "Warning" alert + type: int + max_metadata_over_provision_factor: + description: + - The number of times metadata space can be overprovisioned relative to the amount of space available + type: int + stage2_aware_threshold: + description: + - The number of nodes of capacity remaining in the cluster before the system triggers a notification + type: int + type: dict + + encryption_at_rest: + description: + - enable or disable the Advanced Encryption Standard (AES) 256-bit encryption at rest on the cluster + choices: ['present', 'absent'] + type: str + + set_ntp_info: + description: + - configure NTP on cluster node + - Requires a list of one or more ntp_servers + suboptions: + ntp_servers: + description: + - list of NTP servers to add to each nodes NTP configuration + type: list + elements: str + broadcastclient: + type: bool + default: False + description: + - Enables every node in the cluster as a broadcast client + type: dict + + enable_virtual_volumes: + type: bool + default: True + description: + - Enable the NetApp SolidFire VVols cluster feature +''' + +EXAMPLES = """ + + - name: Configure cluster + tags: + - elementsw_cluster_config + na_elementsw_cluster_config: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + modify_cluster_full_threshold: + stage2_aware_threshold: 2 + stage3_block_threshold_percent: 10 + max_metadata_over_provision_factor: 2 + encryption_at_rest: absent + set_ntp_info: + broadcastclient: False + ntp_servers: + - 1.1.1.1 + - 2.2.2.2 + enable_virtual_volumes: True +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWClusterConfig(object): + """ + Element Software Configure Element SW Cluster + """ + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + + self.argument_spec.update(dict( + modify_cluster_full_threshold=dict( + type='dict', + options=dict( + stage2_aware_threshold=dict(type='int', default=None), + stage3_block_threshold_percent=dict(type='int', default=None), + max_metadata_over_provision_factor=dict(type='int', default=None) + ) + ), + encryption_at_rest=dict(type='str', choices=['present', 'absent']), + set_ntp_info=dict( + type='dict', + options=dict( + broadcastclient=dict(type='bool', default=False), + ntp_servers=dict(type='list', elements='str') + ) + ), + enable_virtual_volumes=dict(type='bool', default=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def get_ntp_details(self): + """ + get ntp info + """ + # Get ntp details + ntp_details = self.sfe.get_ntp_info() + return ntp_details + + def cmp(self, provided_ntp_servers, existing_ntp_servers): + # As python3 doesn't have default cmp function, defining manually to provide same fuctionality. + return (provided_ntp_servers > existing_ntp_servers) - (provided_ntp_servers < existing_ntp_servers) + + def get_cluster_details(self): + """ + get cluster info + """ + cluster_details = self.sfe.get_cluster_info() + return cluster_details + + def get_vvols_status(self): + """ + get vvols status + """ + feature_status = self.sfe.get_feature_status(feature='vvols') + if feature_status is not None: + return feature_status.features[0].enabled + return None + + def get_cluster_full_threshold_status(self): + """ + get cluster full threshold + """ + cluster_full_threshold_status = self.sfe.get_cluster_full_threshold() + return cluster_full_threshold_status + + def setup_ntp_info(self, servers, broadcastclient=None): + """ + configure ntp + """ + # Set ntp servers + try: + self.sfe.set_ntp_info(servers, broadcastclient) + except Exception as exception_object: + self.module.fail_json(msg='Error configuring ntp %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def set_encryption_at_rest(self, state=None): + """ + enable/disable encryption at rest + """ + try: + if state == 'present': + encryption_state = 'enable' + self.sfe.enable_encryption_at_rest() + elif state == 'absent': + encryption_state = 'disable' + self.sfe.disable_encryption_at_rest() + except Exception as exception_object: + self.module.fail_json(msg='Failed to %s rest encryption %s' % (encryption_state, + to_native(exception_object)), + exception=traceback.format_exc()) + + def enable_feature(self, feature): + """ + enable feature + """ + try: + self.sfe.enable_feature(feature=feature) + except Exception as exception_object: + self.module.fail_json(msg='Error enabling %s %s' % (feature, to_native(exception_object)), + exception=traceback.format_exc()) + + def set_cluster_full_threshold(self, stage2_aware_threshold=None, + stage3_block_threshold_percent=None, + max_metadata_over_provision_factor=None): + """ + modify cluster full threshold + """ + try: + self.sfe.modify_cluster_full_threshold(stage2_aware_threshold=stage2_aware_threshold, + stage3_block_threshold_percent=stage3_block_threshold_percent, + max_metadata_over_provision_factor=max_metadata_over_provision_factor) + except Exception as exception_object: + self.module.fail_json(msg='Failed to modify cluster full threshold %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def apply(self): + """ + Cluster configuration + """ + changed = False + result_message = None + + if self.parameters.get('modify_cluster_full_threshold') is not None: + # get cluster full threshold + cluster_full_threshold_details = self.get_cluster_full_threshold_status() + # maxMetadataOverProvisionFactor + current_mmopf = cluster_full_threshold_details.max_metadata_over_provision_factor + # stage3BlockThresholdPercent + current_s3btp = cluster_full_threshold_details.stage3_block_threshold_percent + # stage2AwareThreshold + current_s2at = cluster_full_threshold_details.stage2_aware_threshold + + # is cluster full threshold state change required? + if self.parameters.get("modify_cluster_full_threshold")['max_metadata_over_provision_factor'] is not None and \ + current_mmopf != self.parameters['modify_cluster_full_threshold']['max_metadata_over_provision_factor'] or \ + self.parameters.get("modify_cluster_full_threshold")['stage3_block_threshold_percent'] is not None and \ + current_s3btp != self.parameters['modify_cluster_full_threshold']['stage3_block_threshold_percent'] or \ + self.parameters.get("modify_cluster_full_threshold")['stage2_aware_threshold'] is not None and \ + current_s2at != self.parameters['modify_cluster_full_threshold']['stage2_aware_threshold']: + changed = True + self.set_cluster_full_threshold(self.parameters['modify_cluster_full_threshold']['stage2_aware_threshold'], + self.parameters['modify_cluster_full_threshold']['stage3_block_threshold_percent'], + self.parameters['modify_cluster_full_threshold']['max_metadata_over_provision_factor']) + + if self.parameters.get('encryption_at_rest') is not None: + # get all cluster info + cluster_info = self.get_cluster_details() + # register rest state + current_encryption_at_rest_state = cluster_info.cluster_info.encryption_at_rest_state + + # is encryption state change required? + if current_encryption_at_rest_state == 'disabled' and self.parameters['encryption_at_rest'] == 'present' or \ + current_encryption_at_rest_state == 'enabled' and self.parameters['encryption_at_rest'] == 'absent': + changed = True + self.set_encryption_at_rest(self.parameters['encryption_at_rest']) + + if self.parameters.get('set_ntp_info') is not None: + # get all ntp details + ntp_details = self.get_ntp_details() + # register list of ntp servers + ntp_servers = ntp_details.servers + # broadcastclient + broadcast_client = ntp_details.broadcastclient + + # has either the broadcastclient or the ntp server list changed? + + if self.parameters.get('set_ntp_info')['broadcastclient'] != broadcast_client or \ + self.cmp(self.parameters.get('set_ntp_info')['ntp_servers'], ntp_servers) != 0: + changed = True + self.setup_ntp_info(self.parameters.get('set_ntp_info')['ntp_servers'], + self.parameters.get('set_ntp_info')['broadcastclient']) + + if self.parameters.get('enable_virtual_volumes') is not None: + # check vvols status + current_vvols_status = self.get_vvols_status() + + # has the vvols state changed? + if current_vvols_status is False and self.parameters.get('enable_virtual_volumes') is True: + changed = True + self.enable_feature('vvols') + elif current_vvols_status is True and self.parameters.get('enable_virtual_volumes') is not True: + # vvols, once enabled, cannot be disabled + self.module.fail_json(msg='Error disabling vvols: this feature cannot be undone') + + if self.module.check_mode is True: + result_message = "Check mode, skipping changes" + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + na_elementsw_cluster_config = ElementSWClusterConfig() + na_elementsw_cluster_config.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py new file mode 100644 index 000000000..af064e214 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_cluster_pair + +short_description: NetApp Element Software Manage Cluster Pair +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete cluster pair + +options: + + state: + description: + - Whether the specified cluster pair should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + dest_mvip: + description: + - Destination IP address of the cluster to be paired. + required: true + type: str + + dest_username: + description: + - Destination username for the cluster to be paired. + - Optional if this is same as source cluster username. + type: str + + dest_password: + description: + - Destination password for the cluster to be paired. + - Optional if this is same as source cluster password. + type: str + +''' + +EXAMPLES = """ + - name: Create cluster pair + na_elementsw_cluster_pair: + hostname: "{{ src_hostname }}" + username: "{{ src_username }}" + password: "{{ src_password }}" + state: present + dest_mvip: "{{ dest_hostname }}" + + - name: Delete cluster pair + na_elementsw_cluster_pair: + hostname: "{{ src_hostname }}" + username: "{{ src_username }}" + password: "{{ src_password }}" + state: absent + dest_mvip: "{{ dest_hostname }}" + dest_username: "{{ dest_username }}" + dest_password: "{{ dest_password }}" + +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWClusterPair(object): + """ class to handle cluster pairing operations """ + + def __init__(self): + """ + Setup Ansible parameters and ElementSW connection + """ + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], + default='present'), + dest_mvip=dict(required=True, type='str'), + dest_username=dict(required=False, type='str'), + dest_password=dict(required=False, type='str', no_log=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.elem = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.elem) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # get element_sw_connection for destination cluster + # overwrite existing source host, user and password with destination credentials + self.module.params['hostname'] = self.parameters['dest_mvip'] + # username and password is same as source, + # if dest_username and dest_password aren't specified + if self.parameters.get('dest_username'): + self.module.params['username'] = self.parameters['dest_username'] + if self.parameters.get('dest_password'): + self.module.params['password'] = self.parameters['dest_password'] + self.dest_elem = netapp_utils.create_sf_connection(module=self.module) + self.dest_elementsw_helper = NaElementSWModule(self.dest_elem) + + def check_if_already_paired(self, paired_clusters, hostname): + for pair in paired_clusters.cluster_pairs: + if pair.mvip == hostname: + return pair.cluster_pair_id + return None + + def get_src_pair_id(self): + """ + Check for idempotency + """ + # src cluster and dest cluster exist + paired_clusters = self.elem.list_cluster_pairs() + return self.check_if_already_paired(paired_clusters, self.parameters['dest_mvip']) + + def get_dest_pair_id(self): + """ + Getting destination cluster_pair_id + """ + paired_clusters = self.dest_elem.list_cluster_pairs() + return self.check_if_already_paired(paired_clusters, self.parameters['hostname']) + + def pair_clusters(self): + """ + Start cluster pairing on source, and complete on target cluster + """ + try: + pair_key = self.elem.start_cluster_pairing() + self.dest_elem.complete_cluster_pairing( + cluster_pairing_key=pair_key.cluster_pairing_key) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error pairing cluster %s and %s" + % (self.parameters['hostname'], + self.parameters['dest_mvip']), + exception=to_native(err)) + + def unpair_clusters(self, pair_id_source, pair_id_dest): + """ + Delete cluster pair + """ + try: + self.elem.remove_cluster_pair(cluster_pair_id=pair_id_source) + self.dest_elem.remove_cluster_pair(cluster_pair_id=pair_id_dest) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error unpairing cluster %s and %s" + % (self.parameters['hostname'], + self.parameters['dest_mvip']), + exception=to_native(err)) + + def apply(self): + """ + Call create / delete cluster pair methods + """ + pair_id_source = self.get_src_pair_id() + # If already paired, find the cluster_pair_id of destination cluster + if pair_id_source: + pair_id_dest = self.get_dest_pair_id() + # calling helper to determine action + cd_action = self.na_helper.get_cd_action(pair_id_source, self.parameters) + if cd_action == "create": + self.pair_clusters() + elif cd_action == "delete": + self.unpair_clusters(pair_id_source, pair_id_dest) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ Apply cluster pair actions """ + cluster_obj = ElementSWClusterPair() + cluster_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py new file mode 100644 index 000000000..847700197 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py @@ -0,0 +1,365 @@ +#!/usr/bin/python +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Configure SNMP +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_elementsw_cluster_snmp + +short_description: Configure Element SW Cluster SNMP +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Configure Element Software cluster SNMP. + +options: + + state: + description: + - This module enables you to enable SNMP on cluster nodes. When you enable SNMP, \ + the action applies to all nodes in the cluster, and the values that are passed replace, \ + in whole, all values set in any previous call to this module. + choices: ['present', 'absent'] + default: present + type: str + + snmp_v3_enabled: + description: + - Which version of SNMP has to be enabled. + type: bool + + networks: + description: + - List of networks and what type of access they have to the SNMP servers running on the cluster nodes. + - This parameter is required if SNMP v3 is disabled. + suboptions: + access: + description: + - ro for read-only access. + - rw for read-write access. + - rosys for read-only access to a restricted set of system information. + choices: ['ro', 'rw', 'rosys'] + type: str + cidr: + description: + - A CIDR network mask. This network mask must be an integer greater than or equal to 0, \ + and less than or equal to 32. It must also not be equal to 31. + type: int + community: + description: + - SNMP community string. + type: str + network: + description: + - This parameter along with the cidr variable is used to control which network the access and \ + community string apply to. + - The special value of 'default' is used to specify an entry that applies to all networks. + - The cidr mask is ignored when network value is either a host name or default. + type: str + type: dict + + usm_users: + description: + - List of users and the type of access they have to the SNMP servers running on the cluster nodes. + - This parameter is required if SNMP v3 is enabled. + suboptions: + access: + description: + - rouser for read-only access. + - rwuser for read-write access. + - rosys for read-only access to a restricted set of system information. + choices: ['rouser', 'rwuser', 'rosys'] + type: str + name: + description: + - The name of the user. Must contain at least one character, but no more than 32 characters. + - Blank spaces are not allowed. + type: str + password: + description: + - The password of the user. Must be between 8 and 255 characters long (inclusive). + - Blank spaces are not allowed. + - Required if 'secLevel' is 'auth' or 'priv.' + type: str + passphrase: + description: + - The passphrase of the user. Must be between 8 and 255 characters long (inclusive). + - Blank spaces are not allowed. + - Required if 'secLevel' is 'priv.' + type: str + secLevel: + description: + - To define the security level of a user. + choices: ['noauth', 'auth', 'priv'] + type: str + type: dict + +''' + +EXAMPLES = """ + + - name: configure SnmpNetwork + tags: + - elementsw_cluster_snmp + na_elementsw_cluster_snmp: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + snmp_v3_enabled: True + usm_users: + access: rouser + name: testuser + password: ChangeMe123 + passphrase: ChangeMe123 + secLevel: auth + networks: + access: ro + cidr: 24 + community: TestNetwork + network: 192.168.0.1 + + - name: Disable SnmpNetwork + tags: + - elementsw_cluster_snmp + na_elementsw_cluster_snmp: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWClusterSnmp(object): + """ + Element Software Configure Element SW Cluster SnmpNetwork + """ + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + + self.argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + snmp_v3_enabled=dict(type='bool'), + networks=dict( + type='dict', + options=dict( + access=dict(type='str', choices=['ro', 'rw', 'rosys']), + cidr=dict(type='int', default=None), + community=dict(type='str', default=None), + network=dict(type='str', default=None) + ) + ), + usm_users=dict( + type='dict', + options=dict( + access=dict(type='str', choices=['rouser', 'rwuser', 'rosys']), + name=dict(type='str', default=None), + password=dict(type='str', default=None, no_log=True), + passphrase=dict(type='str', default=None, no_log=True), + secLevel=dict(type='str', choices=['auth', 'noauth', 'priv']) + ) + ), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['snmp_v3_enabled']), + ('snmp_v3_enabled', True, ['usm_users']), + ('snmp_v3_enabled', False, ['networks']) + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + if self.parameters.get('state') == "present": + if self.parameters.get('usm_users') is not None: + # Getting the configuration details to configure SNMP Version3 + self.access_usm = self.parameters.get('usm_users')['access'] + self.name = self.parameters.get('usm_users')['name'] + self.password = self.parameters.get('usm_users')['password'] + self.passphrase = self.parameters.get('usm_users')['passphrase'] + self.secLevel = self.parameters.get('usm_users')['secLevel'] + if self.parameters.get('networks') is not None: + # Getting the configuration details to configure SNMP Version2 + self.access_network = self.parameters.get('networks')['access'] + self.cidr = self.parameters.get('networks')['cidr'] + self.community = self.parameters.get('networks')['community'] + self.network = self.parameters.get('networks')['network'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def enable_snmp(self): + """ + enable snmp feature + """ + try: + self.sfe.enable_snmp(snmp_v3_enabled=self.parameters.get('snmp_v3_enabled')) + except Exception as exception_object: + self.module.fail_json(msg='Error enabling snmp feature %s' % to_native(exception_object), + exception=traceback.format_exc()) + + def disable_snmp(self): + """ + disable snmp feature + """ + try: + self.sfe.disable_snmp() + except Exception as exception_object: + self.module.fail_json(msg='Error disabling snmp feature %s' % to_native(exception_object), + exception=traceback.format_exc()) + + def configure_snmp(self, actual_networks, actual_usm_users): + """ + Configure snmp + """ + try: + self.sfe.set_snmp_acl(networks=[actual_networks], usm_users=[actual_usm_users]) + + except Exception as exception_object: + self.module.fail_json(msg='Error Configuring snmp feature %s' % to_native(exception_object), + exception=traceback.format_exc()) + + def apply(self): + """ + Cluster SNMP configuration + """ + changed = False + result_message = None + update_required = False + version_change = False + is_snmp_enabled = self.sfe.get_snmp_state().enabled + + if is_snmp_enabled is True: + # IF SNMP is already enabled + if self.parameters.get('state') == 'absent': + # Checking for state change(s) here, and applying it later in the code allows us to support + # check_mode + changed = True + + elif self.parameters.get('state') == 'present': + # Checking if SNMP configuration needs to be updated, + is_snmp_v3_enabled = self.sfe.get_snmp_state().snmp_v3_enabled + + if is_snmp_v3_enabled != self.parameters.get('snmp_v3_enabled'): + # Checking if there any version changes required + version_change = True + changed = True + + if is_snmp_v3_enabled is True: + # Checking If snmp configuration for usm_users needs modification + if len(self.sfe.get_snmp_info().usm_users) == 0: + # If snmp is getting configured for first time + update_required = True + changed = True + else: + for usm_user in self.sfe.get_snmp_info().usm_users: + if usm_user.access != self.access_usm or usm_user.name != self.name or usm_user.password != self.password or \ + usm_user.passphrase != self.passphrase or usm_user.sec_level != self.secLevel: + update_required = True + changed = True + else: + # Checking If snmp configuration for networks needs modification + for snmp_network in self.sfe.get_snmp_info().networks: + if snmp_network.access != self.access_network or snmp_network.cidr != self.cidr or \ + snmp_network.community != self.community or snmp_network.network != self.network: + update_required = True + changed = True + + else: + if self.parameters.get('state') == 'present': + changed = True + + result_message = "" + + if changed: + if self.module.check_mode is True: + result_message = "Check mode, skipping changes" + + else: + if self.parameters.get('state') == "present": + # IF snmp is not enabled, then enable and configure snmp + if self.parameters.get('snmp_v3_enabled') is True: + # IF SNMP is enabled with version 3 + usm_users = {'access': self.access_usm, + 'name': self.name, + 'password': self.password, + 'passphrase': self.passphrase, + 'secLevel': self.secLevel} + networks = None + else: + # IF SNMP is enabled with version 2 + usm_users = None + networks = {'access': self.access_network, + 'cidr': self.cidr, + 'community': self.community, + 'network': self.network} + + if is_snmp_enabled is False or version_change is True: + # Enable and configure snmp + self.enable_snmp() + self.configure_snmp(networks, usm_users) + result_message = "SNMP is enabled and configured" + + elif update_required is True: + # If snmp is already enabled, update the configuration if required + self.configure_snmp(networks, usm_users) + result_message = "SNMP is configured" + + elif is_snmp_enabled is True and self.parameters.get('state') == "absent": + # If snmp is enabled and state is absent, disable snmp + self.disable_snmp() + result_message = "SNMP is disabled" + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + na_elementsw_cluster_snmp = ElementSWClusterSnmp() + na_elementsw_cluster_snmp.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py new file mode 100644 index 000000000..f0fd7e38b --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Node Drives +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_drive + +short_description: NetApp Element Software Manage Node Drives +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Add, Erase or Remove drive for nodes on Element Software Cluster. + +options: + drive_ids: + description: + - List of Drive IDs or Serial Names of Node drives. + - If not specified, add and remove action will be performed on all drives of node_id + type: list + elements: str + aliases: ['drive_id'] + + state: + description: + - Element SW Storage Drive operation state. + - present - To add drive of node to participate in cluster data storage. + - absent - To remove the drive from being part of active cluster. + - clean - Clean-up any residual data persistent on a *removed* drive in a secured method. + choices: ['present', 'absent', 'clean'] + default: 'present' + type: str + + node_ids: + description: + - List of IDs or Names of cluster nodes. + - If node_ids and drive_ids are not specified, all available drives in the cluster are added if state is present. + - If node_ids and drive_ids are not specified, all active drives in the cluster are removed if state is absent. + required: false + type: list + elements: str + aliases: ['node_id'] + + force_during_upgrade: + description: + - Flag to force drive operation during upgrade. + - Not supported with latest version of SolidFire SDK (1.7.0.152) + type: 'bool' + + force_during_bin_sync: + description: + - Flag to force during a bin sync operation. + - Not supported with latest version of SolidFire SDK (1.7.0.152) + type: 'bool' +''' + +EXAMPLES = """ + - name: Add drive with status available to cluster + tags: + - elementsw_add_drive + na_elementsw_drive: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J3221807 + force_during_upgrade: false + force_during_bin_sync: false + node_ids: sf4805-meg-03 + + - name: Remove active drive from cluster + tags: + - elementsw_remove_drive + na_elementsw_drive: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + force_during_upgrade: false + drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J321208 + + - name: Secure Erase drive + tags: + - elemensw_clean_drive + na_elementsw_drive: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: clean + drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J432109 + node_ids: sf4805-meg-03 + + - name: Add all the drives of all nodes to cluster + tags: + - elementsw_add_node + na_elementsw_drive: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + force_during_upgrade: false + force_during_bin_sync: false + +""" + + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWDrive(object): + """ + Element Software Storage Drive operations + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent', 'clean'], default='present'), + drive_ids=dict(required=False, type='list', elements='str', aliases=['drive_id']), + node_ids=dict(required=False, type='list', elements='str', aliases=['node_id']), + force_during_upgrade=dict(required=False, type='bool'), + force_during_bin_sync=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + input_params = self.module.params + + self.state = input_params['state'] + self.drive_ids = input_params['drive_ids'] + self.node_ids = input_params['node_ids'] + self.force_during_upgrade = input_params['force_during_upgrade'] + self.force_during_bin_sync = input_params['force_during_bin_sync'] + self.list_nodes = None + self.debug = list() + + if HAS_SF_SDK is False: + self.module.fail_json( + msg="Unable to import the SolidFire Python SDK") + else: + # increase timeout, as removing a disk takes some time + self.sfe = netapp_utils.create_sf_connection(module=self.module, timeout=120) + + def get_node_id(self, node_id): + """ + Get Node ID + :description: Find and retrieve node_id from the active cluster + + :return: node_id (None if not found) + :rtype: node_id + """ + if self.list_nodes is None: + self.list_nodes = self.sfe.list_active_nodes() + for current_node in self.list_nodes.nodes: + if node_id == str(current_node.node_id): + return current_node.node_id + elif node_id == current_node.name: + return current_node.node_id + self.module.fail_json(msg='unable to find node for node_id=%s' % node_id) + + def get_drives_listby_status(self, node_num_ids): + """ + Capture list of drives based on status for a given node_id + :description: Capture list of active, failed and available drives from a given node_id + + :return: None + """ + self.active_drives = dict() + self.available_drives = dict() + self.other_drives = dict() + self.all_drives = self.sfe.list_drives() + + for drive in self.all_drives.drives: + # get all drives if no node is given, or match the node_ids + if node_num_ids is None or drive.node_id in node_num_ids: + if drive.status in ['active', 'failed']: + self.active_drives[drive.serial] = drive.drive_id + elif drive.status == "available": + self.available_drives[drive.serial] = drive.drive_id + else: + self.other_drives[drive.serial] = (drive.drive_id, drive.status) + + self.debug.append('available: %s' % self.available_drives) + self.debug.append('active: %s' % self.active_drives) + self.debug.append('other: %s' % self.other_drives) + + def get_drive_id(self, drive_id, node_num_ids): + """ + Get Drive ID + :description: Find and retrieve drive_id from the active cluster + Assumes self.all_drives is already populated + + :return: node_id (None if not found) + :rtype: node_id + """ + for drive in self.all_drives.drives: + if drive_id == str(drive.drive_id): + break + if drive_id == drive.serial: + break + else: + self.module.fail_json(msg='unable to find drive for drive_id=%s. Debug=%s' % (drive_id, self.debug)) + if node_num_ids and drive.node_id not in node_num_ids: + self.module.fail_json(msg='drive for drive_id=%s belongs to another node, with node_id=%d. Debug=%s' % (drive_id, drive.node_id, self.debug)) + return drive.drive_id, drive.status + + def get_active_drives(self, drives): + """ + return a list of active drives + if drives is specified, only [] or a subset of disks in drives are returned + else all available drives for this node or cluster are returned + """ + if drives is None: + return list(self.active_drives.values()) + return [drive_id for drive_id, status in drives if status in ['active', 'failed']] + + def get_available_drives(self, drives, action): + """ + return a list of available drives (not active) + if drives is specified, only [] or a subset of disks in drives are returned + else all available drives for this node or cluster are returned + """ + if drives is None: + return list(self.available_drives.values()) + action_list = list() + for drive_id, drive_status in drives: + if drive_status == 'available': + action_list.append(drive_id) + elif drive_status in ['active', 'failed']: + # already added + pass + elif drive_status == 'erasing' and action == 'erase': + # already erasing + pass + elif drive_status == 'removing': + self.module.fail_json(msg='Error - cannot %s drive while it is being removed. Debug: %s' % (action, self.debug)) + elif drive_status == 'erasing' and action == 'add': + self.module.fail_json(msg='Error - cannot %s drive while it is being erased. Debug: %s' % (action, self.debug)) + else: + self.module.fail_json(msg='Error - cannot %s drive while it is in %s state. Debug: %s' % (action, drive_status, self.debug)) + return action_list + + def add_drive(self, drives=None): + """ + Add Drive available for Cluster storage expansion + """ + kwargs = dict() + if self.force_during_upgrade is not None: + kwargs['force_during_upgrade'] = self.force_during_upgrade + if self.force_during_bin_sync is not None: + kwargs['force_during_bin_sync'] = self.force_during_bin_sync + try: + self.sfe.add_drives(drives, **kwargs) + except Exception as exception_object: + self.module.fail_json(msg='Error adding drive%s: %s: %s' % + ('s' if len(drives) > 1 else '', + str(drives), + to_native(exception_object)), + exception=traceback.format_exc()) + + def remove_drive(self, drives=None): + """ + Remove Drive active in Cluster + """ + kwargs = dict() + if self.force_during_upgrade is not None: + kwargs['force_during_upgrade'] = self.force_during_upgrade + try: + self.sfe.remove_drives(drives, **kwargs) + except Exception as exception_object: + self.module.fail_json(msg='Error removing drive%s: %s: %s' % + ('s' if len(drives) > 1 else '', + str(drives), + to_native(exception_object)), + exception=traceback.format_exc()) + + def secure_erase(self, drives=None): + """ + Secure Erase any residual data existing on a drive + """ + try: + self.sfe.secure_erase_drives(drives) + except Exception as exception_object: + self.module.fail_json(msg='Error cleaning data from drive%s: %s: %s' % + ('s' if len(drives) > 1 else '', + str(drives), + to_native(exception_object)), + exception=traceback.format_exc()) + + def apply(self): + """ + Check, process and initiate Drive operation + """ + changed = False + + action_list = [] + node_num_ids = None + drives = None + if self.node_ids: + node_num_ids = [self.get_node_id(node_id) for node_id in self.node_ids] + + self.get_drives_listby_status(node_num_ids) + if self.drive_ids: + drives = [self.get_drive_id(drive_id, node_num_ids) for drive_id in self.drive_ids] + + if self.state == "present": + action_list = self.get_available_drives(drives, 'add') + elif self.state == "absent": + action_list = self.get_active_drives(drives) + elif self.state == "clean": + action_list = self.get_available_drives(drives, 'erase') + + if len(action_list) > 0: + changed = True + if not self.module.check_mode and changed: + if self.state == "present": + self.add_drive(action_list) + elif self.state == "absent": + self.remove_drive(action_list) + elif self.state == "clean": + self.secure_erase(action_list) + + self.module.exit_json(changed=changed) + + +def main(): + """ + Main function + """ + + na_elementsw_drive = ElementSWDrive() + na_elementsw_drive.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py new file mode 100644 index 000000000..fde928784 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Info +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_info +short_description: NetApp Element Software Info +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 20.10.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Collect cluster and node information. + - Use a MVIP as hostname for cluster and node scope. + - Use a MIP as hostname for node scope. + - When using MIPs, cluster APIs are expected to fail with 'xUnknownAPIMethod method=ListAccounts' + +options: + gather_subsets: + description: + - list of subsets to gather from target cluster or node + - supported values + - node_config, cluster_accounts, cluster_nodes, cluster_drives. + - additional values + - all - for all subsets, + - all_clusters - all subsets at cluster scope, + - all_nodes - all subsets at node scope + type: list + elements: str + default: ['all'] + aliases: ['gather_subset'] + + filter: + description: + - When a list of records is returned, this can be used to limit the records to be returned. + - If more than one key is used, all keys must match. + type: dict + + fail_on_error: + description: + - by default, errors are not fatal when collecting a subset. The subset will show on error in the info output. + - if set to True, the module fails on the first error. + type: bool + default: false + + fail_on_key_not_found: + description: + - force an error when filter is used and a key is not present in records. + type: bool + default: true + + fail_on_record_not_found: + description: + - force an error when filter is used and no record is matched. + type: bool + default: false +''' + +EXAMPLES = """ + + - name: get all available subsets + na_elementsw_info: + hostname: "{{ elementsw_mvip }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + gather_subsets: all + register: result + + - name: collect data for elementsw accounts using a filter + na_elementsw_info: + hostname: "{{ elementsw_mvip }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + gather_subsets: 'cluster_accounts' + filter: + username: "{{ username_to_find }}" + register: result +""" + +RETURN = """ + +info: + description: + - a dictionary of collected subsets + - each subset if in JSON format + returned: success + type: dict + +debug: + description: + - a list of detailed error messages if some subsets cannot be collected + returned: success + type: list + +""" +from ansible.module_utils.basic import AnsibleModule + +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWInfo(object): + ''' + Element Software Initialize node with ownership for cluster formation + ''' + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + gather_subsets=dict(type='list', elements='str', aliases=['gather_subset'], default='all'), + filter=dict(type='dict'), + fail_on_error=dict(type='bool', default=False), + fail_on_key_not_found=dict(type='bool', default=True), + fail_on_record_not_found=dict(type='bool', default=False), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.debug = list() + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + + # 442 for node APIs, 443 (default) for cluster APIs + for role, port in [('node', 442), ('cluster', 443)]: + try: + conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port) + if role == 'node': + self.sfe_node = conn + else: + self.sfe_cluster = conn + except netapp_utils.solidfire.common.ApiConnectionError as exc: + if str(exc) == "Bad Credentials": + msg = ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster' + msg += '%s reported: %s' % ('Node' if port == 442 else 'Cluster', repr(exc)) + else: + msg = 'Failed to create connection for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc)) + self.module.fail_json(msg=msg) + except Exception as exc: + self.module.fail_json(msg='Failed to connect for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc))) + + # TODO: add new node methods here + self.node_methods = dict( + node_config=self.sfe_node.get_config, + ) + # TODO: add new cluster methods here + self.cluster_methods = dict( + cluster_accounts=self.sfe_cluster.list_accounts, + cluster_drives=self.sfe_cluster.list_drives, + cluster_nodes=self.sfe_cluster.list_all_nodes + ) + self.methods = dict(self.node_methods) + self.methods.update(self.cluster_methods) + + # add telemetry attributes - does not matter if we are using cluster or node here + # TODO: most if not all get and list APIs do not have an attributes parameter + + def get_info(self, name): + ''' + Get Element Info + run a cluster or node list method + return output as json + ''' + info = None + if name not in self.methods: + msg = 'Error: unknown subset %s.' % name + msg += ' Known_subsets: %s' % ', '.join(self.methods.keys()) + self.module.fail_json(msg=msg, debug=self.debug) + try: + info = self.methods[name]() + return info.to_json() + except netapp_utils.solidfire.common.ApiServerError as exc: + # the new SDK rearranged the fields in a different order + if all(x in str(exc) for x in ('err_json', '500', 'xUnknownAPIMethod', 'method=')): + info = 'Error (API not in scope?)' + else: + info = 'Error' + msg = '%s for subset: %s: %s' % (info, name, repr(exc)) + if self.parameters['fail_on_error']: + self.module.fail_json(msg=msg) + self.debug.append(msg) + return info + + def filter_list_of_dict_by_key(self, records, key, value): + matched = list() + for record in records: + if key in record and record[key] == value: + matched.append(record) + if key not in record and self.parameters['fail_on_key_not_found']: + msg = 'Error: key %s not found in %s' % (key, repr(record)) + self.module.fail_json(msg=msg) + return matched + + def filter_records(self, records, filter_dict): + + if isinstance(records, dict): + if len(records) == 1: + key, value = list(records.items())[0] + return dict({key: self.filter_records(value, filter_dict)}) + if not isinstance(records, list): + return records + matched = records + for key, value in filter_dict.items(): + matched = self.filter_list_of_dict_by_key(matched, key, value) + if self.parameters['fail_on_record_not_found'] and len(matched) == 0: + msg = 'Error: no match for %s out of %d records' % (repr(self.parameters['filter']), len(records)) + self.debug.append('Unmatched records: %s' % repr(records)) + self.module.fail_json(msg=msg, debug=self.debug) + return matched + + def get_and_filter_info(self, name): + ''' + Get data + If filter is present, only return the records that are matched + return output as json + ''' + records = self.get_info(name) + if self.parameters.get('filter') is None: + return records + matched = self.filter_records(records, self.parameters.get('filter')) + return matched + + def apply(self): + ''' + Check connection and initialize node with cluster ownership + ''' + changed = False + info = dict() + my_subsets = ('all', 'all_clusters', 'all_nodes') + if any(x in self.parameters['gather_subsets'] for x in my_subsets) and len(self.parameters['gather_subsets']) > 1: + msg = 'When any of %s is used, no other subset is allowed' % repr(my_subsets) + self.module.fail_json(msg=msg) + if 'all' in self.parameters['gather_subsets']: + self.parameters['gather_subsets'] = self.methods.keys() + if 'all_clusters' in self.parameters['gather_subsets']: + self.parameters['gather_subsets'] = self.cluster_methods.keys() + if 'all_nodes' in self.parameters['gather_subsets']: + self.parameters['gather_subsets'] = self.node_methods.keys() + for name in self.parameters['gather_subsets']: + info[name] = self.get_and_filter_info(name) + self.module.exit_json(changed=changed, info=info, debug=self.debug) + + +def main(): + ''' + Main function + ''' + na_elementsw_cluster = ElementSWInfo() + na_elementsw_cluster.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py new file mode 100644 index 000000000..9bef345b4 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software manage initiators +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_elementsw_initiators + +short_description: Manage Element SW initiators +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Manage Element Software initiators that allow external clients access to volumes. + +options: + initiators: + description: A list of objects containing characteristics of each initiator. + suboptions: + name: + description: The name of the initiator. + type: str + required: true + + alias: + description: The friendly name assigned to this initiator. + type: str + + initiator_id: + description: The numeric ID of the initiator. + type: int + + volume_access_group_id: + description: volumeAccessGroupID to which this initiator belongs. + type: int + + attributes: + description: A set of JSON attributes to assign to this initiator. + type: dict + type: list + elements: dict + + state: + description: + - Whether the specified initiator should exist or not. + choices: ['present', 'absent'] + default: present + type: str +''' + +EXAMPLES = """ + + - name: Manage initiators + tags: + - na_elementsw_initiators + na_elementsw_initiators: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + initiators: + - name: a + alias: a1 + initiator_id: 1 + volume_access_group_id: 1 + attributes: {"key": "value"} + - name: b + alias: b2 + initiator_id: 2 + volume_access_group_id: 2 + state: present +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule +HAS_SF_SDK = netapp_utils.has_sf_sdk() +if HAS_SF_SDK: + from solidfire.models import ModifyInitiator + + +class ElementSWInitiators(object): + """ + Element Software Manage Element SW initiators + """ + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + + self.argument_spec.update(dict( + initiators=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + alias=dict(type='str', default=None), + initiator_id=dict(type='int', default=None), + volume_access_group_id=dict(type='int', default=None), + attributes=dict(type='dict', default=None), + ) + ), + state=dict(choices=['present', 'absent'], default='present'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.debug = list() + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # iterate over each user-provided initiator + for initiator in self.parameters.get('initiators'): + # add telemetry attributes + if 'attributes' in initiator and initiator['attributes']: + initiator['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_initiators')) + else: + initiator['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_initiators') + + def compare_initiators(self, user_initiator, existing_initiator): + """ + compare user input initiator with existing dict + :return: True if matched, False otherwise + """ + if user_initiator is None or existing_initiator is None: + return False + changed = False + for param in user_initiator: + # lookup initiator_name instead of name + if param == 'name': + if user_initiator['name'] == existing_initiator['initiator_name']: + pass + elif param == 'initiator_id': + # can't change the key + pass + elif user_initiator[param] == existing_initiator[param]: + pass + else: + self.debug.append('Initiator: %s. Changed: %s from: %s to %s' % + (user_initiator['name'], param, str(existing_initiator[param]), str(user_initiator[param]))) + changed = True + return changed + + def initiator_to_dict(self, initiator_obj): + """ + converts initiator class object to dict + :return: reconstructed initiator dict + """ + known_params = ['initiator_name', + 'alias', + 'initiator_id', + 'volume_access_groups', + 'volume_access_group_id', + 'attributes'] + initiator_dict = {} + + # missing parameter cause error + # so assign defaults + for param in known_params: + initiator_dict[param] = getattr(initiator_obj, param, None) + if initiator_dict['volume_access_groups'] is not None: + if len(initiator_dict['volume_access_groups']) == 1: + initiator_dict['volume_access_group_id'] = initiator_dict['volume_access_groups'][0] + elif len(initiator_dict['volume_access_groups']) > 1: + self.module.fail_json(msg="Only 1 access group is supported, found: %s" % repr(initiator_obj)) + del initiator_dict['volume_access_groups'] + return initiator_dict + + def find_initiator(self, id=None, name=None): + """ + find a specific initiator + :return: initiator dict + """ + initiator_details = None + if self.all_existing_initiators is None: + return initiator_details + for initiator in self.all_existing_initiators: + # if name is provided or + # if id is provided + if name is not None: + if initiator.initiator_name == name: + initiator_details = self.initiator_to_dict(initiator) + elif id is not None: + if initiator.initiator_id == id: + initiator_details = self.initiator_to_dict(initiator) + else: + # if neither id nor name provided + # return everything + initiator_details = self.all_existing_initiators + return initiator_details + + @staticmethod + def rename_key(obj, old_name, new_name): + obj[new_name] = obj.pop(old_name) + + def create_initiator(self, initiator): + """ + create initiator + """ + # SF SDK is using camelCase for this one + self.rename_key(initiator, 'volume_access_group_id', 'volumeAccessGroupID') + # create_initiators needs an array + initiator_list = [initiator] + try: + self.sfe.create_initiators(initiator_list) + except Exception as exception_object: + self.module.fail_json(msg='Error creating initiator %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def delete_initiator(self, initiator): + """ + delete initiator + """ + # delete_initiators needs an array + initiator_id_array = [initiator] + try: + self.sfe.delete_initiators(initiator_id_array) + except Exception as exception_object: + self.module.fail_json(msg='Error deleting initiator %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def modify_initiator(self, initiator, existing_initiator): + """ + modify initiator + """ + # create the new initiator dict + # by merging old and new values + merged_initiator = existing_initiator.copy() + # can't change the key + del initiator['initiator_id'] + merged_initiator.update(initiator) + + # we MUST create an object before sending + # the new initiator to modify_initiator + initiator_object = ModifyInitiator(initiator_id=merged_initiator['initiator_id'], + alias=merged_initiator['alias'], + volume_access_group_id=merged_initiator['volume_access_group_id'], + attributes=merged_initiator['attributes']) + initiator_list = [initiator_object] + try: + self.sfe.modify_initiators(initiators=initiator_list) + except Exception as exception_object: + self.module.fail_json(msg='Error modifying initiator: %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def apply(self): + """ + configure initiators + """ + changed = False + result_message = None + + # get all user provided initiators + input_initiators = self.parameters.get('initiators') + + # get all initiators + # store in a cache variable + self.all_existing_initiators = self.sfe.list_initiators().initiators + + # iterate over each user-provided initiator + for in_initiator in input_initiators: + if self.parameters.get('state') == 'present': + # check if initiator_id is provided and exists + if 'initiator_id' in in_initiator and in_initiator['initiator_id'] is not None and \ + self.find_initiator(id=in_initiator['initiator_id']) is not None: + if self.compare_initiators(in_initiator, self.find_initiator(id=in_initiator['initiator_id'])): + changed = True + result_message = 'modifying initiator(s)' + self.modify_initiator(in_initiator, self.find_initiator(id=in_initiator['initiator_id'])) + # otherwise check if name is provided and exists + elif 'name' in in_initiator and in_initiator['name'] is not None and self.find_initiator(name=in_initiator['name']) is not None: + if self.compare_initiators(in_initiator, self.find_initiator(name=in_initiator['name'])): + changed = True + result_message = 'modifying initiator(s)' + self.modify_initiator(in_initiator, self.find_initiator(name=in_initiator['name'])) + # this is a create op if initiator doesn't exist + else: + changed = True + result_message = 'creating initiator(s)' + self.create_initiator(in_initiator) + elif self.parameters.get('state') == 'absent': + # delete_initiators only processes ids + # so pass ids of initiators to method + if 'name' in in_initiator and in_initiator['name'] is not None and \ + self.find_initiator(name=in_initiator['name']) is not None: + changed = True + result_message = 'deleting initiator(s)' + self.delete_initiator(self.find_initiator(name=in_initiator['name'])['initiator_id']) + elif 'initiator_id' in in_initiator and in_initiator['initiator_id'] is not None and \ + self.find_initiator(id=in_initiator['initiator_id']) is not None: + changed = True + result_message = 'deleting initiator(s)' + self.delete_initiator(in_initiator['initiator_id']) + if self.module.check_mode is True: + result_message = "Check mode, skipping changes" + if self.debug: + result_message += ". %s" % self.debug + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + na_elementsw_initiators = ElementSWInitiators() + na_elementsw_initiators.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py new file mode 100644 index 000000000..a71ddf564 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py @@ -0,0 +1,254 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_ldap + +short_description: NetApp Element Software Manage ldap admin users +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Enable, disable ldap, and add ldap users + +options: + + state: + description: + - Whether the specified volume should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + authType: + description: + - Identifies which user authentication method to use. + choices: ['DirectBind', 'SearchAndBind'] + type: str + + groupSearchBaseDn: + description: + - The base DN of the tree to start the group search (will do a subtree search from here) + type: str + + groupSearchType: + description: + - Controls the default group search filter used + choices: ['NoGroup', 'ActiveDirectory', 'MemberDN'] + type: str + + serverURIs: + description: + - A comma-separated list of LDAP server URIs + type: str + + userSearchBaseDN: + description: + - The base DN of the tree to start the search (will do a subtree search from here) + type: str + + searchBindDN: + description: + - A dully qualified DN to log in with to perform an LDAp search for the user (needs read access to the LDAP directory). + type: str + + searchBindPassword: + description: + - The password for the searchBindDN account used for searching + type: str + + userSearchFilter: + description: + - the LDAP Filter to use + type: str + + userDNTemplate: + description: + - A string that is used form a fully qualified user DN. + type: str + + groupSearchCustomFilter: + description: + - For use with the CustomFilter Search type + type: str +''' + +EXAMPLES = """ + - name: disable ldap authentication + na_elementsw_ldap: + state: absent + username: "{{ admin username }}" + password: "{{ admin password }}" + hostname: "{{ hostname }}" + + - name: Enable ldap authentication + na_elementsw_ldap: + state: present + username: "{{ admin username }}" + password: "{{ admin password }}" + hostname: "{{ hostname }}" + authType: DirectBind + serverURIs: ldap://svmdurlabesx01spd_ldapclnt + groupSearchType: MemberDN + userDNTemplate: uid=%USERNAME%,cn=users,cn=accounts,dc=corp,dc="{{ company name }}",dc=com + + +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except Exception: + HAS_SF_SDK = False + + +class NetappElementLdap(object): + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + authType=dict(type='str', choices=['DirectBind', 'SearchAndBind']), + groupSearchBaseDn=dict(type='str'), + groupSearchType=dict(type='str', choices=['NoGroup', 'ActiveDirectory', 'MemberDN']), + serverURIs=dict(type='str'), + userSearchBaseDN=dict(type='str'), + searchBindDN=dict(type='str'), + searchBindPassword=dict(type='str', no_log=True), + userSearchFilter=dict(type='str'), + userDNTemplate=dict(type='str'), + groupSearchCustomFilter=dict(type='str'), + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + ) + + param = self.module.params + + # set up state variables + self.state = param['state'] + self.authType = param['authType'] + self.groupSearchBaseDn = param['groupSearchBaseDn'] + self.groupSearchType = param['groupSearchType'] + self.serverURIs = param['serverURIs'] + if self.serverURIs is not None: + self.serverURIs = self.serverURIs.split(',') + self.userSearchBaseDN = param['userSearchBaseDN'] + self.searchBindDN = param['searchBindDN'] + self.searchBindPassword = param['searchBindPassword'] + self.userSearchFilter = param['userSearchFilter'] + self.userDNTemplate = param['userDNTemplate'] + self.groupSearchCustomFilter = param['groupSearchCustomFilter'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def get_ldap_configuration(self): + """ + Return ldap configuration if found + + :return: Details about the ldap configuration. None if not found. + :rtype: solidfire.models.GetLdapConfigurationResult + """ + ldap_config = self.sfe.get_ldap_configuration() + return ldap_config + + def enable_ldap(self): + """ + Enable LDAP + :return: nothing + """ + try: + self.sfe.enable_ldap_authentication(self.serverURIs, auth_type=self.authType, + group_search_base_dn=self.groupSearchBaseDn, + group_search_type=self.groupSearchType, + group_search_custom_filter=self.groupSearchCustomFilter, + search_bind_dn=self.searchBindDN, + search_bind_password=self.searchBindPassword, + user_search_base_dn=self.userSearchBaseDN, + user_search_filter=self.userSearchFilter, + user_dntemplate=self.userDNTemplate) + except solidfire.common.ApiServerError as error: + self.module.fail_json(msg='Error enabling LDAP: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def check_config(self, ldap_config): + """ + Check to see if the ldap config has been modified. + :param ldap_config: The LDAP configuration + :return: False if the config is the same as the playbook, True if it is not + """ + if self.authType != ldap_config.ldap_configuration.auth_type: + return True + if self.serverURIs != ldap_config.ldap_configuration.server_uris: + return True + if self.groupSearchBaseDn != ldap_config.ldap_configuration.group_search_base_dn: + return True + if self.groupSearchType != ldap_config.ldap_configuration.group_search_type: + return True + if self.groupSearchCustomFilter != ldap_config.ldap_configuration.group_search_custom_filter: + return True + if self.searchBindDN != ldap_config.ldap_configuration.search_bind_dn: + return True + if self.searchBindPassword != ldap_config.ldap_configuration.search_bind_password: + return True + if self.userSearchBaseDN != ldap_config.ldap_configuration.user_search_base_dn: + return True + if self.userSearchFilter != ldap_config.ldap_configuration.user_search_filter: + return True + if self.userDNTemplate != ldap_config.ldap_configuration.user_dntemplate: + return True + return False + + def apply(self): + changed = False + ldap_config = self.get_ldap_configuration() + if self.state == 'absent': + if ldap_config and ldap_config.ldap_configuration.enabled: + changed = True + if self.state == 'present' and self.check_config(ldap_config): + changed = True + if changed: + if self.module.check_mode: + pass + else: + if self.state == 'present': + self.enable_ldap() + elif self.state == 'absent': + self.sfe.disable_ldap_authentication() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetappElementLdap() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py new file mode 100644 index 000000000..a9151a620 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py @@ -0,0 +1,423 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Node Network Interfaces - Bond 1G and 10G configuration +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_network_interfaces + +short_description: NetApp Element Software Configure Node Network Interfaces +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Configure Element SW Node Network Interfaces for Bond 1G and 10G IP addresses. + - This module does not create interfaces, it expects the interfaces to already exists and can only modify them. + - This module cannot set or modify the method (Loopback, manual, dhcp, static). + - This module is not idempotent and does not support check_mode. + +options: + method: + description: + - deprecated, this option would trigger a 'updated failed' error + type: str + + ip_address_1g: + description: + - deprecated, use bond_1g option. + type: str + + ip_address_10g: + description: + - deprecated, use bond_10g option. + type: str + + subnet_1g: + description: + - deprecated, use bond_1g option. + type: str + + subnet_10g: + description: + - deprecated, use bond_10g option. + type: str + + gateway_address_1g: + description: + - deprecated, use bond_1g option. + type: str + + gateway_address_10g: + description: + - deprecated, use bond_10g option. + type: str + + mtu_1g: + description: + - deprecated, use bond_1g option. + type: str + + mtu_10g: + description: + - deprecated, use bond_10g option. + type: str + + dns_nameservers: + description: + - deprecated, use bond_1g and bond_10g options. + type: list + elements: str + + dns_search_domains: + description: + - deprecated, use bond_1g and bond_10g options. + type: list + elements: str + + bond_mode_1g: + description: + - deprecated, use bond_1g option. + type: str + + bond_mode_10g: + description: + - deprecated, use bond_10g option. + type: str + + lacp_1g: + description: + - deprecated, use bond_1g option. + type: str + + lacp_10g: + description: + - deprecated, use bond_10g option. + type: str + + virtual_network_tag: + description: + - deprecated, use bond_1g and bond_10g options. + type: str + + bond_1g: + description: + - settings for the Bond1G interface. + type: dict + suboptions: + address: + description: + - IP address for the interface. + type: str + netmask: + description: + - subnet mask for the interface. + type: str + gateway: + description: + - IP router network address to send packets out of the local network. + type: str + mtu: + description: + - The largest packet size (in bytes) that the interface can transmit.. + - Must be greater than or equal to 1500 bytes. + type: str + dns_nameservers: + description: + - List of addresses for domain name servers. + type: list + elements: str + dns_search: + description: + - List of DNS search domains. + type: list + elements: str + bond_mode: + description: + - Bonding mode. + choices: ['ActivePassive', 'ALB', 'LACP'] + type: str + bond_lacp_rate: + description: + - Link Aggregation Control Protocol - useful only if LACP is selected as the Bond Mode. + - Slow - Packets are transmitted at 30 second intervals. + - Fast - Packets are transmitted in 1 second intervals. + choices: ['Fast', 'Slow'] + type: str + virtual_network_tag: + description: + - The virtual network identifier of the interface (VLAN tag). + type: str + + bond_10g: + description: + - settings for the Bond10G interface. + type: dict + suboptions: + address: + description: + - IP address for the interface. + type: str + netmask: + description: + - subnet mask for the interface. + type: str + gateway: + description: + - IP router network address to send packets out of the local network. + type: str + mtu: + description: + - The largest packet size (in bytes) that the interface can transmit.. + - Must be greater than or equal to 1500 bytes. + type: str + dns_nameservers: + description: + - List of addresses for domain name servers. + type: list + elements: str + dns_search: + description: + - List of DNS search domains. + type: list + elements: str + bond_mode: + description: + - Bonding mode. + choices: ['ActivePassive', 'ALB', 'LACP'] + type: str + bond_lacp_rate: + description: + - Link Aggregation Control Protocol - useful only if LACP is selected as the Bond Mode. + - Slow - Packets are transmitted at 30 second intervals. + - Fast - Packets are transmitted in 1 second intervals. + choices: ['Fast', 'Slow'] + type: str + virtual_network_tag: + description: + - The virtual network identifier of the interface (VLAN tag). + type: str + +''' + +EXAMPLES = """ + + - name: Set Node network interfaces configuration for Bond 1G and 10G properties + tags: + - elementsw_network_interfaces + na_elementsw_network_interfaces: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + bond_1g: + address: 10.253.168.131 + netmask: 255.255.248.0 + gateway: 10.253.168.1 + mtu: '1500' + bond_mode: ActivePassive + dns_nameservers: dns1,dns2 + dns_search: domain1,domain2 + bond_10g: + address: 10.253.1.202 + netmask: 255.255.255.192 + gateway: 10.253.1.193 + mtu: '9000' + bond_mode: LACP + bond_lacp_rate: Fast + virtual_network_tag: vnet_tag +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + +try: + from solidfire.models import Network, NetworkConfig + from solidfire.common import ApiConnectionError as sf_ApiConnectionError, ApiServerError as sf_ApiServerError + HAS_SF_SDK = True +except ImportError: + HAS_SF_SDK = False + + +class ElementSWNetworkInterfaces(object): + """ + Element Software Network Interfaces - Bond 1G and 10G Network configuration + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + method=dict(required=False, type='str'), + ip_address_1g=dict(required=False, type='str'), + ip_address_10g=dict(required=False, type='str'), + subnet_1g=dict(required=False, type='str'), + subnet_10g=dict(required=False, type='str'), + gateway_address_1g=dict(required=False, type='str'), + gateway_address_10g=dict(required=False, type='str'), + mtu_1g=dict(required=False, type='str'), + mtu_10g=dict(required=False, type='str'), + dns_nameservers=dict(required=False, type='list', elements='str'), + dns_search_domains=dict(required=False, type='list', elements='str'), + bond_mode_1g=dict(required=False, type='str'), + bond_mode_10g=dict(required=False, type='str'), + lacp_1g=dict(required=False, type='str'), + lacp_10g=dict(required=False, type='str'), + virtual_network_tag=dict(required=False, type='str'), + bond_1g=dict(required=False, type='dict', options=dict( + address=dict(required=False, type='str'), + netmask=dict(required=False, type='str'), + gateway=dict(required=False, type='str'), + mtu=dict(required=False, type='str'), + dns_nameservers=dict(required=False, type='list', elements='str'), + dns_search=dict(required=False, type='list', elements='str'), + bond_mode=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP']), + bond_lacp_rate=dict(required=False, type='str', choices=['Fast', 'Slow']), + virtual_network_tag=dict(required=False, type='str'), + )), + bond_10g=dict(required=False, type='dict', options=dict( + address=dict(required=False, type='str'), + netmask=dict(required=False, type='str'), + gateway=dict(required=False, type='str'), + mtu=dict(required=False, type='str'), + dns_nameservers=dict(required=False, type='list', elements='str'), + dns_search=dict(required=False, type='list', elements='str'), + bond_mode=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP']), + bond_lacp_rate=dict(required=False, type='str', choices=['Fast', 'Slow']), + virtual_network_tag=dict(required=False, type='str'), + )), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=False + ) + + input_params = self.module.params + self.fail_when_deprecated_options_are_set(input_params) + + self.bond1g = input_params['bond_1g'] + self.bond10g = input_params['bond_10g'] + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + # increase time out, as it may take 30 seconds when making a change + self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442, timeout=90) + + def fail_when_deprecated_options_are_set(self, input_params): + ''' report an error and exit if any deprecated options is set ''' + + dparms_1g = [x for x in ('ip_address_1g', 'subnet_1g', 'gateway_address_1g', 'mtu_1g', 'bond_mode_1g', 'lacp_1g') + if input_params[x] is not None] + dparms_10g = [x for x in ('ip_address_10g', 'subnet_10g', 'gateway_address_10g', 'mtu_10g', 'bond_mode_10g', 'lacp_10g') + if input_params[x] is not None] + dparms_common = [x for x in ('dns_nameservers', 'dns_search_domains', 'virtual_network_tag') + if input_params[x] is not None] + + error_msg = '' + if dparms_1g and dparms_10g: + error_msg = 'Please use the new bond_1g and bond_10g options to configure the bond interfaces.' + elif dparms_1g: + error_msg = 'Please use the new bond_1g option to configure the bond 1G interface.' + elif dparms_10g: + error_msg = 'Please use the new bond_10g option to configure the bond 10G interface.' + elif dparms_common: + error_msg = 'Please use the new bond_1g or bond_10g options to configure the bond interfaces.' + if input_params['method']: + error_msg = 'This module cannot set or change "method". ' + error_msg + dparms_common.append('method') + if error_msg: + error_msg += ' The following parameters are deprecated and cannot be used: ' + dparms = dparms_1g + dparms.extend(dparms_10g) + dparms.extend(dparms_common) + error_msg += ', '.join(dparms) + self.module.fail_json(msg=error_msg) + + def set_network_config(self, network_object): + """ + set network configuration + """ + try: + self.sfe.set_network_config(network=network_object) + except (sf_ApiConnectionError, sf_ApiServerError) as exception_object: + self.module.fail_json(msg='Error setting network config for node %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def set_network_config_object(self, network_params): + ''' set SolidFire network config object ''' + network_config = dict() + if network_params is not None: + for key in network_params: + if network_params[key] is not None: + network_config[key] = network_params[key] + if network_config: + return NetworkConfig(**network_config) + return None + + def set_network_object(self): + """ + Set Element SW Network object + :description: set Network object + + :return: Network object + :rtype: object(Network object) + """ + bond_1g_network = self.set_network_config_object(self.bond1g) + bond_10g_network = self.set_network_config_object(self.bond10g) + network_object = None + if bond_1g_network is not None or bond_10g_network is not None: + network_object = Network(bond1_g=bond_1g_network, + bond10_g=bond_10g_network) + return network_object + + def apply(self): + """ + Check connection and initialize node with cluster ownership + """ + changed = False + result_message = None + network_object = self.set_network_object() + if network_object is not None: + if not self.module.check_mode: + self.set_network_config(network_object) + changed = True + else: + result_message = "Skipping changes, No change requested" + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + elementsw_network_interfaces = ElementSWNetworkInterfaces() + elementsw_network_interfaces.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py new file mode 100644 index 000000000..d1412f2d4 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element Software Node Operation +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_node + +short_description: NetApp Element Software Node Operation +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Add, remove cluster node on Element Software Cluster. + - Set cluster name on node. + - When using the preset_only option, hostname/username/password are required but not used. + +options: + state: + description: + - Element Software Storage Node operation state. + - present - To add pending node to participate in cluster data storage. + - absent - To remove node from active cluster. A node cannot be removed if active drives are present. + choices: ['present', 'absent'] + default: 'present' + type: str + + node_ids: + description: + - List of IDs or Names or IP Addresses of nodes to add or remove. + - If cluster_name is set, node MIPs are required. + type: list + elements: str + required: true + aliases: ['node_id'] + + cluster_name: + description: + - If set, the current node configuration is updated with this name before adding the node to the cluster. + - This requires the node_ids to be specified as MIPs (Management IP Adresses) + type: str + version_added: 20.9.0 + + preset_only: + description: + - If true and state is 'present', set the cluster name for each node in node_ids, but do not add the nodes. + - They can be added using na_elementsw_cluster for initial cluster creation. + - If false, proceed with addition/removal. + type: bool + default: false + version_added: 20.9.0 +''' + +EXAMPLES = """ + - name: Add node from pending to active cluster + tags: + - elementsw_add_node + na_elementsw_node: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + node_id: sf4805-meg-03 + + - name: Remove active node from cluster + tags: + - elementsw_remove_node + na_elementsw_node: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + node_id: 13 + + - name: Add node from pending to active cluster using node IP + tags: + - elementsw_add_node_ip + na_elementsw_node: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + node_id: 10.109.48.65 + cluster_name: sfcluster01 + + - name: Only set cluster name + tags: + - elementsw_add_node_ip + na_elementsw_node: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + node_ids: 10.109.48.65,10.109.48.66 + cluster_name: sfcluster01 + preset_only: true +""" + + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementSWNode(object): + """ + Element SW Storage Node operations + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + node_ids=dict(required=True, type='list', elements='str', aliases=['node_id']), + cluster_name=dict(required=False, type='str'), + preset_only=dict(required=False, type='bool', default=False), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + input_params = self.module.params + + self.state = input_params['state'] + self.node_ids = input_params['node_ids'] + self.cluster_name = input_params['cluster_name'] + self.preset_only = input_params['preset_only'] + + if HAS_SF_SDK is False: + self.module.fail_json( + msg="Unable to import the SolidFire Python SDK") + elif not self.preset_only: + # Cluster connection is only needed for add/delete operations + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + def check_node_has_active_drives(self, node_id=None): + """ + Check if node has active drives attached to cluster + :description: Validate if node have active drives in cluster + + :return: True or False + :rtype: bool + """ + if node_id is not None: + cluster_drives = self.sfe.list_drives() + for drive in cluster_drives.drives: + if drive.node_id == node_id and drive.status == "active": + return True + return False + + @staticmethod + def extract_node_info(node_list): + summary = list() + for node in node_list: + node_dict = dict() + for key, value in vars(node).items(): + if key in ['assigned_node_id', 'cip', 'mip', 'name', 'node_id', 'pending_node_id', 'sip']: + node_dict[key] = value + summary.append(node_dict) + return summary + + def get_node_list(self): + """ + Get Node List + :description: Find and retrieve node_ids from the active cluster + + :return: None + :rtype: None + """ + action_nodes_list = list() + if len(self.node_ids) > 0: + unprocessed_node_list = list(self.node_ids) + list_nodes = [] + try: + all_nodes = self.sfe.list_all_nodes() + except netapp_utils.solidfire.common.ApiServerError as exception_object: + self.module.fail_json(msg='Error getting list of nodes from cluster: %s' % to_native(exception_object), + exception=traceback.format_exc()) + + # For add operation lookup for nodes list with status pendingNodes list + # else nodes will have to be traverse through active cluster + if self.state == "present": + list_nodes = all_nodes.pending_nodes + else: + list_nodes = all_nodes.nodes + + for current_node in list_nodes: + if self.state == "absent" and \ + (str(current_node.node_id) in self.node_ids or current_node.name in self.node_ids or current_node.mip in self.node_ids): + if self.check_node_has_active_drives(current_node.node_id): + self.module.fail_json(msg='Error deleting node %s: node has active drives' % current_node.name) + else: + action_nodes_list.append(current_node.node_id) + if self.state == "present" and \ + (str(current_node.pending_node_id) in self.node_ids or current_node.name in self.node_ids or current_node.mip in self.node_ids): + action_nodes_list.append(current_node.pending_node_id) + + # report an error if state == present and node is unknown + if self.state == "present": + for current_node in all_nodes.nodes: + if str(current_node.node_id) in unprocessed_node_list: + unprocessed_node_list.remove(str(current_node.node_id)) + elif current_node.name in unprocessed_node_list: + unprocessed_node_list.remove(current_node.name) + elif current_node.mip in unprocessed_node_list: + unprocessed_node_list.remove(current_node.mip) + for current_node in all_nodes.pending_nodes: + if str(current_node.pending_node_id) in unprocessed_node_list: + unprocessed_node_list.remove(str(current_node.pending_node_id)) + elif current_node.name in unprocessed_node_list: + unprocessed_node_list.remove(current_node.name) + elif current_node.mip in unprocessed_node_list: + unprocessed_node_list.remove(current_node.mip) + if len(unprocessed_node_list) > 0: + summary = dict( + nodes=self.extract_node_info(all_nodes.nodes), + pending_nodes=self.extract_node_info(all_nodes.pending_nodes), + pending_active_nodes=self.extract_node_info(all_nodes.pending_active_nodes) + ) + self.module.fail_json(msg='Error adding nodes %s: nodes not in pending or active lists: %s' % + (to_native(unprocessed_node_list), repr(summary))) + return action_nodes_list + + def add_node(self, nodes_list=None): + """ + Add Node that are on PendingNodes list available on Cluster + """ + try: + self.sfe.add_nodes(nodes_list, auto_install=True) + except Exception as exception_object: + self.module.fail_json(msg='Error adding nodes %s to cluster: %s' % (nodes_list, to_native(exception_object)), + exception=traceback.format_exc()) + + def remove_node(self, nodes_list=None): + """ + Remove active node from Cluster + """ + try: + self.sfe.remove_nodes(nodes_list) + except Exception as exception_object: + self.module.fail_json(msg='Error removing nodes %s from cluster %s' % (nodes_list, to_native(exception_object)), + exception=traceback.format_exc()) + + def set_cluster_name(self, node): + ''' set up cluster name for the node using its MIP ''' + cluster = dict(cluster=self.cluster_name) + port = 442 + try: + node_cx = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, hostname=node, port=port) + except netapp_utils.solidfire.common.ApiConnectionError as exc: + if str(exc) == "Bad Credentials": + msg = 'Most likely the node %s is already in a cluster.' % node + msg += ' Make sure to use valid node credentials for username and password.' + msg += ' Node reported: %s' % repr(exc) + else: + msg = 'Failed to create connection: %s' % repr(exc) + self.module.fail_json(msg=msg) + except Exception as exc: + self.module.fail_json(msg='Failed to connect to %s:%d - %s' % (node, port, to_native(exc)), + exception=traceback.format_exc()) + + try: + cluster_config = node_cx.get_cluster_config() + except netapp_utils.solidfire.common.ApiServerError as exc: + self.module.fail_json(msg='Error getting cluster config: %s' % to_native(exc), + exception=traceback.format_exc()) + + if cluster_config.cluster.cluster == self.cluster_name: + return False + if cluster_config.cluster.state == 'Active': + self.module.fail_json(msg="Error updating cluster name for node %s, already in 'Active' state" + % node, cluster_config=repr(cluster_config)) + if self.module.check_mode: + return True + + try: + node_cx.set_cluster_config(cluster) + except netapp_utils.solidfire.common.ApiServerError as exc: + self.module.fail_json(msg='Error updating cluster name: %s' % to_native(exc), + cluster_config=repr(cluster_config), + exception=traceback.format_exc()) + return True + + def apply(self): + """ + Check, process and initiate Cluster Node operation + """ + changed = False + updated_nodes = list() + result_message = '' + if self.state == "present" and self.cluster_name is not None: + for node in self.node_ids: + if self.set_cluster_name(node): + changed = True + updated_nodes.append(node) + if not self.preset_only: + # let's see if there is anything to add or remove + action_nodes_list = self.get_node_list() + action = None + if self.state == "present" and len(action_nodes_list) > 0: + changed = True + action = 'added' + if not self.module.check_mode: + self.add_node(action_nodes_list) + elif self.state == "absent" and len(action_nodes_list) > 0: + changed = True + action = 'removed' + if not self.module.check_mode: + self.remove_node(action_nodes_list) + if action: + result_message = 'List of %s nodes: %s - requested: %s' % (action, to_native(action_nodes_list), to_native(self.node_ids)) + if updated_nodes: + result_message += '\n' if result_message else '' + result_message += 'List of updated nodes with %s: %s' % (self.cluster_name, updated_nodes) + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + + na_elementsw_node = ElementSWNode() + na_elementsw_node.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py new file mode 100644 index 000000000..9d9e16994 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py @@ -0,0 +1,270 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Element Software QOS Policy +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_qos_policy + +short_description: NetApp Element Software create/modify/rename/delete QOS Policy +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 20.9.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, modify, rename, or delete QOS policy on Element Software Cluster. + +options: + + state: + description: + - Whether the specified QOS policy should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + name: + description: + - Name or id for the QOS policy. + required: true + type: str + + from_name: + description: + - Name or id for the QOS policy to be renamed. + type: str + + qos: + description: + - The quality of service (QQOS) for the policy. + - Required for create + - Supported keys are minIOPS, maxIOPS, burstIOPS + type: dict + suboptions: + minIOPS: + description: The minimum number of IOPS guaranteed for the volume. + type: int + version_added: 21.3.0 + maxIOPS: + description: The maximum number of IOPS allowed for the volume. + type: int + version_added: 21.3.0 + burstIOPS: + description: The maximum number of IOPS allowed over a short period of time for the volume. + type: int + version_added: 21.3.0 + debug: + description: report additional information when set to true. + type: bool + default: false + version_added: 21.3.0 +''' + +EXAMPLES = """ + - name: Add QOS Policy + na_elementsw_qos_policy: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: gold + qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000} + + - name: Modify QOS Policy + na_elementsw_qos_policy: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + name: gold + qos: {minIOPS: 100, maxIOPS: 5000, burstIOPS: 20000} + + - name: Rename QOS Policy + na_elementsw_qos_policy: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + from_name: gold + name: silver + + - name: Remove QOS Policy + na_elementsw_qos_policy: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + name: silver +""" + + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWQosPolicy(object): + """ + Element Software QOS Policy + """ + + def __init__(self): + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + qos=dict(required=False, type='dict', options=dict( + minIOPS=dict(type='int'), + maxIOPS=dict(type='int'), + burstIOPS=dict(type='int'), + )), + debug=dict(required=False, type='bool', default=False) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # Set up state variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.qos_policy_id = None + self.debug = dict() + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_qos_policy') + + def get_qos_policy(self, name): + """ + Get QOS Policy + """ + policy, error = self.elementsw_helper.get_qos_policy(name) + if error is not None: + self.module.fail_json(msg=error, exception=traceback.format_exc()) + self.debug['current_policy'] = policy + return policy + + def create_qos_policy(self, name, qos): + """ + Create the QOS Policy + """ + try: + self.sfe.create_qos_policy(name=name, qos=qos) + except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc: + self.module.fail_json(msg="Error creating qos policy: %s: %s" % + (name, to_native(exc)), exception=traceback.format_exc()) + + def update_qos_policy(self, qos_policy_id, modify, name=None): + """ + Update the QOS Policy if the policy already exists + """ + options = dict( + qos_policy_id=qos_policy_id + ) + if name is not None: + options['name'] = name + if 'qos' in modify: + options['qos'] = modify['qos'] + + try: + self.sfe.modify_qos_policy(**options) + except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc: + self.module.fail_json(msg="Error updating qos policy: %s: %s" % + (self.parameters['from_name'] if name is not None else self.parameters['name'], to_native(exc)), + exception=traceback.format_exc()) + + def delete_qos_policy(self, qos_policy_id): + """ + Delete the QOS Policy + """ + try: + self.sfe.delete_qos_policy(qos_policy_id=qos_policy_id) + except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc: + self.module.fail_json(msg="Error deleting qos policy: %s: %s" % + (self.parameters['name'], to_native(exc)), exception=traceback.format_exc()) + + def apply(self): + """ + Process the create/delete/rename/modify actions for qos policy on the Element Software Cluster + """ + modify = dict() + current = self.get_qos_policy(self.parameters['name']) + qos_policy_id = None if current is None else current['qos_policy_id'] + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name') is not None: + from_qos_policy = self.get_qos_policy(self.parameters['from_name']) + if from_qos_policy is None: + self.module.fail_json(msg="Error renaming qos policy, no existing policy with name/id: %s" % self.parameters['from_name']) + cd_action = 'rename' + qos_policy_id = from_qos_policy['qos_policy_id'] + self.na_helper.changed = True + modify = self.na_helper.get_modified_attributes(from_qos_policy, self.parameters) + if cd_action == 'create' and 'qos' not in self.parameters: + self.module.fail_json(msg="Error creating qos policy: %s, 'qos:' option is required" % self.parameters['name']) + self.debug['modify'] = modify + + if not self.module.check_mode: + if cd_action == 'create': + self.create_qos_policy(self.parameters['name'], self.parameters['qos']) + elif cd_action == 'delete': + self.delete_qos_policy(qos_policy_id) + elif cd_action == 'rename': + self.update_qos_policy(qos_policy_id, modify, name=self.parameters['name']) + elif modify: + self.update_qos_policy(qos_policy_id, modify) + + results = dict(changed=self.na_helper.changed) + if self.parameters['debug']: + results['debug'] = self.debug + self.module.exit_json(**results) + + +def main(): + """ + Main function + """ + na_elementsw_qos_policy = ElementSWQosPolicy() + na_elementsw_qos_policy.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py new file mode 100644 index 000000000..23144e42e --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Element OS Software Snapshot Manager +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_snapshot + +short_description: NetApp Element Software Manage Snapshots +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create, Modify or Delete Snapshot on Element OS Cluster. + +options: + name: + description: + - Name of new snapshot create. + - If unspecified, date and time when the snapshot was taken is used. + type: str + + state: + description: + - Whether the specified snapshot should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + src_volume_id: + description: + - ID or Name of active volume. + required: true + type: str + + account_id: + description: + - Account ID or Name of Parent/Source Volume. + required: true + type: str + + retention: + description: + - Retention period for the snapshot. + - Format is 'HH:mm:ss'. + type: str + + src_snapshot_id: + description: + - ID or Name of an existing snapshot. + - Required when C(state=present), to modify snapshot properties. + - Required when C(state=present), to create snapshot from another snapshot in the volume. + - Required when C(state=absent), to delete snapshot. + type: str + + enable_remote_replication: + description: + - Flag, whether to replicate the snapshot created to a remote replication cluster. + - To enable specify 'true' value. + type: bool + + snap_mirror_label: + description: + - Label used by SnapMirror software to specify snapshot retention policy on SnapMirror endpoint. + type: str + + expiration_time: + description: + - The date and time (format ISO 8601 date string) at which this snapshot will expire. + type: str +''' + +EXAMPLES = """ + - name: Create snapshot + tags: + - elementsw_create_snapshot + na_elementsw_snapshot: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + src_volume_id: 118 + account_id: sagarsh + name: newsnapshot-1 + + - name: Modify Snapshot + tags: + - elementsw_modify_snapshot + na_elementsw_snapshot: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + src_volume_id: sagarshansivolume + src_snapshot_id: test1 + account_id: sagarsh + expiration_time: '2018-06-16T12:24:56Z' + enable_remote_replication: false + + - name: Delete Snapshot + tags: + - elementsw_delete_snapshot + na_elementsw_snapshot: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + src_snapshot_id: deltest1 + account_id: sagarsh + src_volume_id: sagarshansivolume +""" + + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementOSSnapshot(object): + """ + Element OS Snapshot Manager + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + account_id=dict(required=True, type='str'), + name=dict(required=False, type='str'), + src_volume_id=dict(required=True, type='str'), + retention=dict(required=False, type='str'), + src_snapshot_id=dict(required=False, type='str'), + enable_remote_replication=dict(required=False, type='bool'), + expiration_time=dict(required=False, type='str'), + snap_mirror_label=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + input_params = self.module.params + + self.state = input_params['state'] + self.name = input_params['name'] + self.account_id = input_params['account_id'] + self.src_volume_id = input_params['src_volume_id'] + self.src_snapshot_id = input_params['src_snapshot_id'] + self.retention = input_params['retention'] + self.properties_provided = False + + self.expiration_time = input_params['expiration_time'] + if input_params['expiration_time'] is not None: + self.properties_provided = True + + self.enable_remote_replication = input_params['enable_remote_replication'] + if input_params['enable_remote_replication'] is not None: + self.properties_provided = True + + self.snap_mirror_label = input_params['snap_mirror_label'] + if input_params['snap_mirror_label'] is not None: + self.properties_provided = True + + if self.state == 'absent' and self.src_snapshot_id is None: + self.module.fail_json( + msg="Please provide required parameter : snapshot_id") + + if HAS_SF_SDK is False: + self.module.fail_json( + msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_snapshot') + + def get_account_id(self): + """ + Return account id if found + """ + try: + # Update and return self.account_id + self.account_id = self.elementsw_helper.account_exists(self.account_id) + return self.account_id + except Exception as err: + self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err)) + + def get_src_volume_id(self): + """ + Return volume id if found + """ + src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id) + if src_vol_id is not None: + # Update and return self.volume_id + self.src_volume_id = src_vol_id + # Return src_volume_id + return self.src_volume_id + return None + + def get_snapshot(self, name=None): + """ + Return snapshot details if found + """ + src_snapshot = None + if name is not None: + src_snapshot = self.elementsw_helper.get_snapshot(name, self.src_volume_id) + elif self.src_snapshot_id is not None: + src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id) + if src_snapshot is not None: + # Update self.src_snapshot_id + self.src_snapshot_id = src_snapshot.snapshot_id + # Return src_snapshot + return src_snapshot + + def create_snapshot(self): + """ + Create Snapshot + """ + try: + self.sfe.create_snapshot(volume_id=self.src_volume_id, + snapshot_id=self.src_snapshot_id, + name=self.name, + enable_remote_replication=self.enable_remote_replication, + retention=self.retention, + snap_mirror_label=self.snap_mirror_label, + attributes=self.attributes) + except Exception as exception_object: + self.module.fail_json( + msg='Error creating snapshot %s' % ( + to_native(exception_object)), + exception=traceback.format_exc()) + + def modify_snapshot(self): + """ + Modify Snapshot Properties + """ + try: + self.sfe.modify_snapshot(snapshot_id=self.src_snapshot_id, + expiration_time=self.expiration_time, + enable_remote_replication=self.enable_remote_replication, + snap_mirror_label=self.snap_mirror_label) + except Exception as exception_object: + self.module.fail_json( + msg='Error modify snapshot %s' % ( + to_native(exception_object)), + exception=traceback.format_exc()) + + def delete_snapshot(self): + """ + Delete Snapshot + """ + try: + self.sfe.delete_snapshot(snapshot_id=self.src_snapshot_id) + except Exception as exception_object: + self.module.fail_json( + msg='Error delete snapshot %s' % ( + to_native(exception_object)), + exception=traceback.format_exc()) + + def apply(self): + """ + Check, process and initiate snapshot operation + """ + changed = False + result_message = None + self.get_account_id() + + # Dont proceed if source volume is not found + if self.get_src_volume_id() is None: + self.module.fail_json(msg="Volume id not found %s" % self.src_volume_id) + + # Get snapshot details using source volume + snapshot_detail = self.get_snapshot() + + if snapshot_detail: + if self.properties_provided: + if self.expiration_time != snapshot_detail.expiration_time: + changed = True + else: # To preserve value in case parameter expiration_time is not defined/provided. + self.expiration_time = snapshot_detail.expiration_time + + if self.enable_remote_replication != snapshot_detail.enable_remote_replication: + changed = True + else: # To preserve value in case parameter enable_remote_Replication is not defined/provided. + self.enable_remote_replication = snapshot_detail.enable_remote_replication + + if self.snap_mirror_label != snapshot_detail.snap_mirror_label: + changed = True + else: # To preserve value in case parameter snap_mirror_label is not defined/provided. + self.snap_mirror_label = snapshot_detail.snap_mirror_label + + if self.account_id is None or self.src_volume_id is None or self.module.check_mode: + changed = False + result_message = "Check mode, skipping changes" + elif self.state == 'absent' and snapshot_detail is not None: + self.delete_snapshot() + changed = True + elif self.state == 'present' and snapshot_detail is not None: + if changed: + self.modify_snapshot() # Modify Snapshot properties + elif not self.properties_provided: + if self.name is not None: + snapshot = self.get_snapshot(self.name) + # If snapshot with name already exists return without performing any action + if snapshot is None: + self.create_snapshot() # Create Snapshot using parent src_snapshot_id + changed = True + else: + self.create_snapshot() + changed = True + elif self.state == 'present': + if self.name is not None: + snapshot = self.get_snapshot(self.name) + # If snapshot with name already exists return without performing any action + if snapshot is None: + self.create_snapshot() # Create Snapshot using parent src_snapshot_id + changed = True + else: + self.create_snapshot() + changed = True + else: + changed = False + result_message = "No changes requested, skipping changes" + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + + na_elementsw_snapshot = ElementOSSnapshot() + na_elementsw_snapshot.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py new file mode 100644 index 000000000..1e9d8e59a --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py @@ -0,0 +1,203 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Element Software Snapshot Restore +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_snapshot_restore + +short_description: NetApp Element Software Restore Snapshot +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Element OS Cluster restore snapshot to volume. + +options: + + src_volume_id: + description: + - ID or Name of source active volume. + required: true + type: str + + src_snapshot_id: + description: + - ID or Name of an existing snapshot. + required: true + type: str + + dest_volume_name: + description: + - New Name of destination for restoring the snapshot + required: true + type: str + + account_id: + description: + - Account ID or Name of Parent/Source Volume. + required: true + type: str +''' + +EXAMPLES = """ + - name: Restore snapshot to volume + tags: + - elementsw_create_snapshot_restore + na_elementsw_snapshot_restore: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + account_id: ansible-1 + src_snapshot_id: snapshot_20171021 + src_volume_id: volume-playarea + dest_volume_name: dest-volume-area + +""" + + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementOSSnapshotRestore(object): + """ + Element OS Restore from snapshot + """ + + def __init__(self): + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + account_id=dict(required=True, type='str'), + src_volume_id=dict(required=True, type='str'), + dest_volume_name=dict(required=True, type='str'), + src_snapshot_id=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + input_params = self.module.params + + self.account_id = input_params['account_id'] + self.src_volume_id = input_params['src_volume_id'] + self.dest_volume_name = input_params['dest_volume_name'] + self.src_snapshot_id = input_params['src_snapshot_id'] + + if HAS_SF_SDK is False: + self.module.fail_json( + msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_snapshot_restore') + + def get_account_id(self): + """ + Get account id if found + """ + try: + # Update and return self.account_id + self.account_id = self.elementsw_helper.account_exists(self.account_id) + return self.account_id + except Exception as err: + self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err)) + + def get_snapshot_id(self): + """ + Return snapshot details if found + """ + src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id) + # Update and return self.src_snapshot_id + if src_snapshot: + self.src_snapshot_id = src_snapshot.snapshot_id + # Return self.src_snapshot_id + return self.src_snapshot_id + return None + + def restore_snapshot(self): + """ + Restore Snapshot to Volume + """ + try: + self.sfe.clone_volume(volume_id=self.src_volume_id, + name=self.dest_volume_name, + snapshot_id=self.src_snapshot_id, + attributes=self.attributes) + except Exception as exception_object: + self.module.fail_json( + msg='Error restore snapshot %s' % (to_native(exception_object)), + exception=traceback.format_exc()) + + def apply(self): + """ + Check, process and initiate restore snapshot to volume operation + """ + changed = False + result_message = None + self.get_account_id() + src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id) + + if src_vol_id is not None: + # Update self.src_volume_id + self.src_volume_id = src_vol_id + if self.get_snapshot_id() is not None: + # Addressing idempotency by comparing volume does not exist with same volume name + if self.elementsw_helper.volume_exists(self.dest_volume_name, self.account_id) is None: + self.restore_snapshot() + changed = True + else: + result_message = "No changes requested, Skipping changes" + else: + self.module.fail_json(msg="Snapshot id not found %s" % self.src_snapshot_id) + else: + self.module.fail_json(msg="Volume id not found %s" % self.src_volume_id) + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """ + Main function + """ + na_elementsw_snapshot_restore = ElementOSSnapshotRestore() + na_elementsw_snapshot_restore.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py new file mode 100644 index 000000000..2ace1bd4b --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py @@ -0,0 +1,586 @@ +#!/usr/bin/python +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Element SW Software Snapshot Schedule""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_snapshot_schedule + +short_description: NetApp Element Software Snapshot Schedules +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, destroy, or update snapshot schedules on ElementSW + +options: + + state: + description: + - Whether the specified schedule should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + paused: + description: + - Pause / Resume a schedule. + type: bool + + recurring: + description: + - Should the schedule recur? + type: bool + + schedule_type: + description: + - Schedule type for creating schedule. + choices: ['DaysOfWeekFrequency','DaysOfMonthFrequency','TimeIntervalFrequency'] + type: str + + time_interval_days: + description: Time interval in days. + type: int + + time_interval_hours: + description: Time interval in hours. + type: int + + time_interval_minutes: + description: Time interval in minutes. + type: int + + days_of_week_weekdays: + description: List of days of the week (Sunday to Saturday) + type: list + elements: str + + days_of_week_hours: + description: Time specified in hours + type: int + + days_of_week_minutes: + description: Time specified in minutes. + type: int + + days_of_month_monthdays: + description: List of days of the month (1-31) + type: list + elements: int + + days_of_month_hours: + description: Time specified in hours + type: int + + days_of_month_minutes: + description: Time specified in minutes. + type: int + + name: + description: + - Name for the snapshot schedule. + - It accepts either schedule_id or schedule_name + - if name is digit, it will consider as schedule_id + - If name is string, it will consider as schedule_name + required: true + type: str + + snapshot_name: + description: + - Name for the created snapshots. + type: str + + volumes: + description: + - Volume IDs that you want to set the snapshot schedule for. + - It accepts both volume_name and volume_id + type: list + elements: str + + account_id: + description: + - Account ID for the owner of this volume. + - It accepts either account_name or account_id + - if account_id is digit, it will consider as account_id + - If account_id is string, it will consider as account_name + type: str + + retention: + description: + - Retention period for the snapshot. + - Format is 'HH:mm:ss'. + type: str + + starting_date: + description: + - Starting date for the schedule. + - Required when C(state=present). + - "Format: C(2016-12-01T00:00:00Z)" + type: str +''' + +EXAMPLES = """ + - name: Create Snapshot schedule + na_elementsw_snapshot_schedule: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: Schedule_A + schedule_type: TimeIntervalFrequency + time_interval_days: 1 + starting_date: '2016-12-01T00:00:00Z' + retention: '24:00:00' + volumes: + - 7 + - test + account_id: 1 + + - name: Update Snapshot schedule + na_elementsw_snapshot_schedule: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: Schedule_A + schedule_type: TimeIntervalFrequency + time_interval_days: 1 + starting_date: '2016-12-01T00:00:00Z' + retention: '24:00:00' + volumes: + - 8 + - test1 + account_id: 1 + + - name: Delete Snapshot schedule + na_elementsw_snapshot_schedule: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + name: 6 +""" + +RETURN = """ + +schedule_id: + description: Schedule ID of the newly created schedule + returned: success + type: str +""" +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + from solidfire.custom.models import DaysOfWeekFrequency, Weekday, DaysOfMonthFrequency + from solidfire.common import ApiConnectionError, ApiServerError + from solidfire.custom.models import TimeIntervalFrequency + from solidfire.models import Schedule, ScheduleInfo +except ImportError: + HAS_SF_SDK = False + +try: + # Hack to see if we we have the 1.7 version of the SDK, or later + from solidfire.common.model import VER3 + HAS_SF_SDK_1_7 = True + del VER3 +except ImportError: + HAS_SF_SDK_1_7 = False + + +class ElementSWSnapShotSchedule(object): + """ + Contains methods to parse arguments, + derive details of ElementSW objects + and send requests to ElementSW via + the ElementSW SDK + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check paramenters and ensure SDK is installed + """ + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + schedule_type=dict(required=False, choices=['DaysOfWeekFrequency', 'DaysOfMonthFrequency', 'TimeIntervalFrequency']), + + time_interval_days=dict(required=False, type='int'), + time_interval_hours=dict(required=False, type='int'), + time_interval_minutes=dict(required=False, type='int'), + + days_of_week_weekdays=dict(required=False, type='list', elements='str'), + days_of_week_hours=dict(required=False, type='int'), + days_of_week_minutes=dict(required=False, type='int'), + + days_of_month_monthdays=dict(required=False, type='list', elements='int'), + days_of_month_hours=dict(required=False, type='int'), + days_of_month_minutes=dict(required=False, type='int'), + + paused=dict(required=False, type='bool'), + recurring=dict(required=False, type='bool'), + + starting_date=dict(required=False, type='str'), + + snapshot_name=dict(required=False, type='str'), + volumes=dict(required=False, type='list', elements='str'), + account_id=dict(required=False, type='str'), + retention=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['account_id', 'volumes', 'schedule_type']), + ('schedule_type', 'DaysOfMonthFrequency', ['days_of_month_monthdays']), + ('schedule_type', 'DaysOfWeekFrequency', ['days_of_week_weekdays']) + + ], + supports_check_mode=True + ) + + param = self.module.params + + # set up state variables + self.state = param['state'] + self.name = param['name'] + self.schedule_type = param['schedule_type'] + self.days_of_week_weekdays = param['days_of_week_weekdays'] + self.days_of_week_hours = param['days_of_week_hours'] + self.days_of_week_minutes = param['days_of_week_minutes'] + self.days_of_month_monthdays = param['days_of_month_monthdays'] + self.days_of_month_hours = param['days_of_month_hours'] + self.days_of_month_minutes = param['days_of_month_minutes'] + self.time_interval_days = param['time_interval_days'] + self.time_interval_hours = param['time_interval_hours'] + self.time_interval_minutes = param['time_interval_minutes'] + self.paused = param['paused'] + self.recurring = param['recurring'] + if self.schedule_type == 'DaysOfWeekFrequency': + # Create self.weekday list if self.schedule_type is days_of_week + if self.days_of_week_weekdays is not None: + # Create self.weekday list if self.schedule_type is days_of_week + self.weekdays = [] + for day in self.days_of_week_weekdays: + if str(day).isdigit(): + # If id specified, return appropriate day + self.weekdays.append(Weekday.from_id(int(day))) + else: + # If name specified, return appropriate day + self.weekdays.append(Weekday.from_name(day.capitalize())) + + if self.state == 'present' and self.schedule_type is None: + # Mandate schedule_type for create operation + self.module.fail_json( + msg="Please provide required parameter: schedule_type") + + # Mandate schedule name for delete operation + if self.state == 'absent' and self.name is None: + self.module.fail_json( + msg="Please provide required parameter: name") + + self.starting_date = param['starting_date'] + self.snapshot_name = param['snapshot_name'] + self.volumes = param['volumes'] + self.account_id = param['account_id'] + self.retention = param['retention'] + self.create_schedule_result = None + + if HAS_SF_SDK is False: + # Create ElementSW connection + self.module.fail_json(msg="Unable to import the ElementSW Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + self.elementsw_helper = NaElementSWModule(self.sfe) + + def get_schedule(self): + # Checking whether schedule id is exist or not + # Return schedule details if found, None otherwise + # If exist set variable self.name + try: + schedule_list = self.sfe.list_schedules() + except ApiServerError: + return None + + for schedule in schedule_list.schedules: + if schedule.to_be_deleted: + # skip this schedule if it is being deleted, it can as well not exist + continue + if str(schedule.schedule_id) == self.name: + self.name = schedule.name + return schedule + elif schedule.name == self.name: + return schedule + return None + + def get_account_id(self): + # Validate account id + # Return account_id if found, None otherwise + try: + account_id = self.elementsw_helper.account_exists(self.account_id) + return account_id + except ApiServerError: + return None + + def get_volume_id(self): + # Validate volume_ids + # Return volume ids if found, fail if not found + volume_ids = [] + for volume in self.volumes: + volume_id = self.elementsw_helper.volume_exists(volume.strip(), self.account_id) + if volume_id: + volume_ids.append(volume_id) + else: + self.module.fail_json(msg='Specified volume %s does not exist' % volume) + return volume_ids + + def get_frequency(self): + # Configuring frequency depends on self.schedule_type + frequency = None + if self.schedule_type is not None and self.schedule_type == 'DaysOfWeekFrequency': + if self.weekdays is not None: + params = dict(weekdays=self.weekdays) + if self.days_of_week_hours is not None: + params['hours'] = self.days_of_week_hours + if self.days_of_week_minutes is not None: + params['minutes'] = self.days_of_week_minutes + frequency = DaysOfWeekFrequency(**params) + elif self.schedule_type is not None and self.schedule_type == 'DaysOfMonthFrequency': + if self.days_of_month_monthdays is not None: + params = dict(monthdays=self.days_of_month_monthdays) + if self.days_of_month_hours is not None: + params['hours'] = self.days_of_month_hours + if self.days_of_month_minutes is not None: + params['minutes'] = self.days_of_month_minutes + frequency = DaysOfMonthFrequency(**params) + elif self.schedule_type is not None and self.schedule_type == 'TimeIntervalFrequency': + params = dict() + if self.time_interval_days is not None: + params['days'] = self.time_interval_days + if self.time_interval_hours is not None: + params['hours'] = self.time_interval_hours + if self.time_interval_minutes is not None: + params['minutes'] = self.time_interval_minutes + if not params or sum(params.values()) == 0: + self.module.fail_json(msg='Specify at least one non zero value with TimeIntervalFrequency.') + frequency = TimeIntervalFrequency(**params) + return frequency + + def is_same_schedule_type(self, schedule_detail): + # To check schedule type is same or not + if str(schedule_detail.frequency).split('(', maxsplit=1)[0] == self.schedule_type: + return True + else: + return False + + def create_schedule(self): + # Create schedule + try: + frequency = self.get_frequency() + if frequency is None: + self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type) + + # Create schedule + name = self.name + schedule_info = ScheduleInfo( + volume_ids=self.volumes, + snapshot_name=self.snapshot_name, + retention=self.retention + ) + if HAS_SF_SDK_1_7: + sched = Schedule(frequency, name, schedule_info) + else: + sched = Schedule(schedule_info, name, frequency) + sched.paused = self.paused + sched.recurring = self.recurring + sched.starting_date = self.starting_date + + self.create_schedule_result = self.sfe.create_schedule(sched) + + except (ApiServerError, ApiConnectionError) as exc: + self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(exc)), + exception=traceback.format_exc()) + + def delete_schedule(self, schedule_id): + # delete schedule + try: + get_schedule_result = self.sfe.get_schedule(schedule_id=schedule_id) + sched = get_schedule_result.schedule + sched.to_be_deleted = True + self.sfe.modify_schedule(schedule=sched) + + except (ApiServerError, ApiConnectionError) as exc: + self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(exc)), + exception=traceback.format_exc()) + + def update_schedule(self, schedule_id): + # Update schedule + try: + get_schedule_result = self.sfe.get_schedule(schedule_id=schedule_id) + sched = get_schedule_result.schedule + # Update schedule properties + sched.frequency = self.get_frequency() + if sched.frequency is None: + self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type) + + if self.volumes is not None and len(self.volumes) > 0: + sched.schedule_info.volume_ids = self.volumes + if self.retention is not None: + sched.schedule_info.retention = self.retention + if self.snapshot_name is not None: + sched.schedule_info.snapshot_name = self.snapshot_name + if self.paused is not None: + sched.paused = self.paused + if self.recurring is not None: + sched.recurring = self.recurring + if self.starting_date is not None: + sched.starting_date = self.starting_date + + # Make API call + self.sfe.modify_schedule(schedule=sched) + + except (ApiServerError, ApiConnectionError) as exc: + self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(exc)), + exception=traceback.format_exc()) + + def apply(self): + # Perform pre-checks, call functions and exit + + changed = False + update_schedule = False + + if self.account_id is not None: + self.account_id = self.get_account_id() + + if self.state == 'present' and self.volumes is not None: + if self.account_id: + self.volumes = self.get_volume_id() + else: + self.module.fail_json(msg='Specified account id does not exist') + + # Getting the schedule details + schedule_detail = self.get_schedule() + + if schedule_detail is None and self.state == 'present': + if len(self.volumes) > 0: + changed = True + else: + self.module.fail_json(msg='Specified volumes not on cluster') + elif schedule_detail is not None: + # Getting the schedule id + if self.state == 'absent': + changed = True + else: + # Check if we need to update the snapshot schedule + if self.retention is not None and schedule_detail.schedule_info.retention != self.retention: + update_schedule = True + changed = True + elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name: + update_schedule = True + changed = True + elif self.paused is not None and schedule_detail.paused != self.paused: + update_schedule = True + changed = True + elif self.recurring is not None and schedule_detail.recurring != self.recurring: + update_schedule = True + changed = True + elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date: + update_schedule = True + changed = True + elif self.volumes is not None and len(self.volumes) > 0: + for volume_id in schedule_detail.schedule_info.volume_ids: + if volume_id not in self.volumes: + update_schedule = True + changed = True + + temp_frequency = self.get_frequency() + if temp_frequency is not None: + # Checking schedule_type changes + if self.is_same_schedule_type(schedule_detail): + # If same schedule type + if self.schedule_type == "TimeIntervalFrequency": + # Check if there is any change in schedule.frequency, If schedule_type is time_interval + if schedule_detail.frequency.days != temp_frequency.days or \ + schedule_detail.frequency.hours != temp_frequency.hours or \ + schedule_detail.frequency.minutes != temp_frequency.minutes: + update_schedule = True + changed = True + elif self.schedule_type == "DaysOfMonthFrequency": + # Check if there is any change in schedule.frequency, If schedule_type is days_of_month + if len(schedule_detail.frequency.monthdays) != len(temp_frequency.monthdays) or \ + schedule_detail.frequency.hours != temp_frequency.hours or \ + schedule_detail.frequency.minutes != temp_frequency.minutes: + update_schedule = True + changed = True + elif len(schedule_detail.frequency.monthdays) == len(temp_frequency.monthdays): + actual_frequency_monthday = schedule_detail.frequency.monthdays + temp_frequency_monthday = temp_frequency.monthdays + for monthday in actual_frequency_monthday: + if monthday not in temp_frequency_monthday: + update_schedule = True + changed = True + elif self.schedule_type == "DaysOfWeekFrequency": + # Check if there is any change in schedule.frequency, If schedule_type is days_of_week + if len(schedule_detail.frequency.weekdays) != len(temp_frequency.weekdays) or \ + schedule_detail.frequency.hours != temp_frequency.hours or \ + schedule_detail.frequency.minutes != temp_frequency.minutes: + update_schedule = True + changed = True + elif len(schedule_detail.frequency.weekdays) == len(temp_frequency.weekdays): + actual_frequency_weekdays = schedule_detail.frequency.weekdays + temp_frequency_weekdays = temp_frequency.weekdays + if len([actual_weekday for actual_weekday, temp_weekday in + zip(actual_frequency_weekdays, temp_frequency_weekdays) if actual_weekday != temp_weekday]) != 0: + update_schedule = True + changed = True + else: + update_schedule = True + changed = True + else: + self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type) + + result_message = " " + if changed: + if self.module.check_mode: + # Skip changes + result_message = "Check mode, skipping changes" + else: + if self.state == 'present': + if update_schedule: + self.update_schedule(schedule_detail.schedule_id) + result_message = "Snapshot Schedule modified" + else: + self.create_schedule() + result_message = "Snapshot Schedule created" + elif self.state == 'absent': + self.delete_schedule(schedule_detail.schedule_id) + result_message = "Snapshot Schedule deleted" + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + sss = ElementSWSnapShotSchedule() + sss.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py new file mode 100644 index 000000000..299338ad5 --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_vlan + +short_description: NetApp Element Software Manage VLAN +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete, modify VLAN + +options: + + state: + description: + - Whether the specified vlan should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + vlan_tag: + description: + - Virtual Network Tag + required: true + type: str + + name: + description: + - User defined name for the new VLAN + - Name of the vlan is unique + - Required for create + type: str + + svip: + description: + - Storage virtual IP which is unique + - Required for create + type: str + + address_blocks: + description: + - List of address blocks for the VLAN + - Each address block contains the starting IP address and size for the block + - Required for create + type: list + elements: dict + + netmask: + description: + - Netmask for the VLAN + - Required for create + type: str + + gateway: + description: + - Gateway for the VLAN + type: str + + namespace: + description: + - Enable or disable namespaces + type: bool + + attributes: + description: + - Dictionary of attributes with name and value for each attribute + type: dict + +''' + +EXAMPLES = """ +- name: Create vlan + na_elementsw_vlan: + state: present + name: test + vlan_tag: 1 + svip: "{{ ip address }}" + netmask: "{{ netmask }}" + address_blocks: + - start: "{{ starting ip_address }}" + size: 5 + - start: "{{ starting ip_address }}" + size: 5 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Delete Lun + na_elementsw_vlan: + state: absent + vlan_tag: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWVlan(object): + """ class to handle VLAN operations """ + + def __init__(self): + """ + Setup Ansible parameters and ElementSW connection + """ + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], + default='present'), + name=dict(required=False, type='str'), + vlan_tag=dict(required=True, type='str'), + svip=dict(required=False, type='str'), + netmask=dict(required=False, type='str'), + gateway=dict(required=False, type='str'), + namespace=dict(required=False, type='bool'), + attributes=dict(required=False, type='dict'), + address_blocks=dict(required=False, type='list', elements='dict') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.elem = netapp_utils.create_sf_connection(module=self.module) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.elementsw_helper = NaElementSWModule(self.elem) + + # add telemetry attributes + if self.parameters.get('attributes') is not None: + self.parameters['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan')) + else: + self.parameters['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan') + + def validate_keys(self): + """ + Validate if all required keys are present before creating + """ + required_keys = ['address_blocks', 'svip', 'netmask', 'name'] + if all(item in self.parameters.keys() for item in required_keys) is False: + self.module.fail_json(msg="One or more required fields %s for creating VLAN is missing" + % required_keys) + addr_blk_fields = ['start', 'size'] + for address in self.parameters['address_blocks']: + if 'start' not in address or 'size' not in address: + self.module.fail_json(msg="One or more required fields %s for address blocks is missing" + % addr_blk_fields) + + def create_network(self): + """ + Add VLAN + """ + try: + self.validate_keys() + create_params = self.parameters.copy() + for key in ['username', 'hostname', 'password', 'state', 'vlan_tag']: + del create_params[key] + self.elem.add_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **create_params) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error creating VLAN %s" + % self.parameters['vlan_tag'], + exception=to_native(err)) + + def delete_network(self): + """ + Remove VLAN + """ + try: + self.elem.remove_virtual_network(virtual_network_tag=self.parameters['vlan_tag']) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error deleting VLAN %s" + % self.parameters['vlan_tag'], + exception=to_native(err)) + + def modify_network(self, modify): + """ + Modify the VLAN + """ + try: + self.elem.modify_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **modify) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error modifying VLAN %s" + % self.parameters['vlan_tag'], + exception=to_native(err)) + + def get_network_details(self): + """ + Check existing VLANs + :return: vlan details if found, None otherwise + :type: dict + """ + vlans = self.elem.list_virtual_networks(virtual_network_tag=self.parameters['vlan_tag']) + vlan_details = dict() + for vlan in vlans.virtual_networks: + if vlan is not None: + vlan_details['name'] = vlan.name + vlan_details['address_blocks'] = list() + for address in vlan.address_blocks: + vlan_details['address_blocks'].append({ + 'start': address.start, + 'size': address.size + }) + vlan_details['svip'] = vlan.svip + vlan_details['gateway'] = vlan.gateway + vlan_details['netmask'] = vlan.netmask + vlan_details['namespace'] = vlan.namespace + vlan_details['attributes'] = vlan.attributes + return vlan_details + return None + + def apply(self): + """ + Call create / delete / modify vlan methods + """ + network = self.get_network_details() + # calling helper to determine action + cd_action = self.na_helper.get_cd_action(network, self.parameters) + modify = self.na_helper.get_modified_attributes(network, self.parameters) + if not self.module.check_mode: + if cd_action == "create": + self.create_network() + elif cd_action == "delete": + self.delete_network() + elif modify: + if 'attributes' in modify: + # new attributes will replace existing ones + modify['attributes'] = self.parameters['attributes'] + self.modify_network(modify) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ Apply vlan actions """ + network_obj = ElementSWVlan() + network_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py new file mode 100644 index 000000000..3fcaf00ce --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py @@ -0,0 +1,413 @@ +#!/usr/bin/python + +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Element OS Software Volume Manager""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_volume + +short_description: NetApp Element Software Manage Volumes +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, destroy, or update volumes on ElementSW + +options: + + state: + description: + - Whether the specified volume should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + name: + description: + - The name of the volume to manage. + - It accepts volume_name or volume_id + required: true + type: str + + account_id: + description: + - Account ID for the owner of this volume. + - It accepts Account_id or Account_name + required: true + type: str + + enable512e: + description: + - Required when C(state=present) + - Should the volume provide 512-byte sector emulation? + type: bool + aliases: + - enable512emulation + + qos: + description: Initial quality of service settings for this volume. Configure as dict in playbooks. + type: dict + + qos_policy_name: + description: + - Quality of service policy for this volume. + - It can be a name or an id. + - Mutually exclusive with C(qos) option. + type: str + + attributes: + description: A YAML dictionary of attributes that you would like to apply on this volume. + type: dict + + size: + description: + - The size of the volume in (size_unit). + - Required when C(state = present). + type: int + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + type: str + + access: + description: + - Access allowed for the volume. + - readOnly Only read operations are allowed. + - readWrite Reads and writes are allowed. + - locked No reads or writes are allowed. + - replicationTarget Identify a volume as the target volume for a paired set of volumes. + - If the volume is not paired, the access status is locked. + - If unspecified, the access settings of the clone will be the same as the source. + choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget'] + type: str +''' + +EXAMPLES = """ + - name: Create Volume + na_elementsw_volume: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: AnsibleVol + qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000} + account_id: 3 + enable512e: False + size: 1 + size_unit: gb + + - name: Update Volume + na_elementsw_volume: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: present + name: AnsibleVol + account_id: 3 + access: readWrite + + - name: Delete Volume + na_elementsw_volume: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + state: absent + name: AnsibleVol + account_id: 2 +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWVolume(object): + """ + Contains methods to parse arguments, + derive details of ElementSW objects + and send requests to ElementOS via + the ElementSW SDK + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check paramenters and ensure SDK is installed + """ + self._size_unit_map = netapp_utils.SF_BYTE_MAP + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + account_id=dict(required=True), + enable512e=dict(required=False, type='bool', aliases=['enable512emulation']), + qos=dict(required=False, type='dict', default=None), + qos_policy_name=dict(required=False, type='str', default=None), + attributes=dict(required=False, type='dict', default=None), + size=dict(type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + + access=dict(required=False, type='str', default=None, + choices=['readOnly', 'readWrite', 'locked', 'replicationTarget']), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['size', 'enable512e']) + ], + mutually_exclusive=[ + ('qos', 'qos_policy_name'), + ], + supports_check_mode=True + ) + + param = self.module.params + + # set up state variables + self.state = param['state'] + self.name = param['name'] + self.account_id = param['account_id'] + self.enable512e = param['enable512e'] + self.qos = param['qos'] + self.qos_policy_name = param['qos_policy_name'] + self.attributes = param['attributes'] + self.access = param['access'] + self.size_unit = param['size_unit'] + if param['size'] is not None: + self.size = param['size'] * self._size_unit_map[self.size_unit] + else: + self.size = None + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the ElementSW Python SDK") + else: + try: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + except solidfire.common.ApiServerError: + self.module.fail_json(msg="Unable to create the connection") + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + if self.attributes is not None: + self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume')) + else: + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume') + + def get_account_id(self): + """ + Return account id if found + """ + try: + # Update and return self.account_id + self.account_id = self.elementsw_helper.account_exists(self.account_id) + except Exception as err: + self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err)) + return self.account_id + + def get_qos_policy(self, name): + """ + Get QOS Policy + """ + policy, error = self.elementsw_helper.get_qos_policy(name) + if error is not None: + self.module.fail_json(msg=error) + return policy + + def get_volume(self): + """ + Return volume details if found + """ + # Get volume details + volume_id = self.elementsw_helper.volume_exists(self.name, self.account_id) + + if volume_id is not None: + # Return volume_details + volume_details = self.elementsw_helper.get_volume(volume_id) + if volume_details is not None: + return volume_details + return None + + def create_volume(self, qos_policy_id): + """ + Create Volume + :return: True if created, False if fails + """ + options = dict( + name=self.name, + account_id=self.account_id, + total_size=self.size, + enable512e=self.enable512e, + attributes=self.attributes + ) + if qos_policy_id is not None: + options['qos_policy_id'] = qos_policy_id + if self.qos is not None: + options['qos'] = self.qos + try: + self.sfe.create_volume(**options) + except Exception as err: + self.module.fail_json(msg="Error provisioning volume: %s of size: %s" % (self.name, self.size), + exception=to_native(err)) + + def delete_volume(self, volume_id): + """ + Delete and purge the volume using volume id + :return: Success : True , Failed : False + """ + try: + self.sfe.delete_volume(volume_id=volume_id) + self.sfe.purge_deleted_volume(volume_id=volume_id) + # Delete method will delete and also purge the volume instead of moving the volume state to inactive. + + except Exception as err: + # Throwing the exact error message instead of generic error message + self.module.fail_json(msg='Error deleting volume: %s, %s' % (str(volume_id), to_native(err)), + exception=to_native(err)) + + def update_volume(self, volume_id, qos_policy_id): + """ + Update the volume with the specified param + :return: Success : True, Failed : False + """ + options = dict( + attributes=self.attributes + ) + if self.access is not None: + options['access'] = self.access + if self.account_id is not None: + options['account_id'] = self.account_id + if self.qos is not None: + options['qos'] = self.qos + if qos_policy_id is not None: + options['qos_policy_id'] = qos_policy_id + if self.size is not None: + options['total_size'] = self.size + try: + self.sfe.modify_volume(volume_id, **options) + except Exception as err: + # Throwing the exact error message instead of generic error message + self.module.fail_json(msg='Error updating volume: %s, %s' % (str(volume_id), to_native(err)), + exception=to_native(err)) + + def apply(self): + # Perform pre-checks, call functions and exit + changed = False + qos_policy_id = None + action = None + + self.get_account_id() + volume_detail = self.get_volume() + + if self.state == 'present' and self.qos_policy_name is not None: + policy = self.get_qos_policy(self.qos_policy_name) + if policy is None: + error = 'Cannot find qos policy with name/id: %s' % self.qos_policy_name + self.module.fail_json(msg=error) + qos_policy_id = policy['qos_policy_id'] + + if volume_detail: + volume_id = volume_detail.volume_id + if self.state == 'absent': + action = 'delete' + + elif self.state == 'present': + # Checking all the params for update operation + if self.access is not None and volume_detail.access != self.access: + action = 'update' + + if self.account_id is not None and volume_detail.account_id != self.account_id: + action = 'update' + + if qos_policy_id is not None and volume_detail.qos_policy_id != qos_policy_id: + # volume_detail.qos_policy_id may be None if no policy is associated with the volume + action = 'update' + + if self.qos is not None and volume_detail.qos_policy_id is not None: + # remove qos_policy + action = 'update' + + if self.qos is not None: + # Actual volume_detail.qos has ['burst_iops', 'burst_time', 'curve', 'max_iops', 'min_iops'] keys. + # As only minOPS, maxOPS, burstOPS is important to consider, checking only these values. + volume_qos = vars(volume_detail.qos) + if volume_qos['min_iops'] != self.qos['minIOPS'] or volume_qos['max_iops'] != self.qos['maxIOPS'] \ + or volume_qos['burst_iops'] != self.qos['burstIOPS']: + action = 'update' + + if self.size is not None and volume_detail.total_size is not None and volume_detail.total_size != self.size: + size_difference = abs(float(volume_detail.total_size - self.size)) + # Change size only if difference is bigger than 0.001 + if size_difference / self.size > 0.001: + action = 'update' + + if self.attributes is not None and volume_detail.attributes != self.attributes: + action = 'update' + + elif self.state == 'present': + action = 'create' + + result_message = "" + + if action is not None: + changed = True + if self.module.check_mode: + result_message = "Check mode, skipping changes" + else: + if action == 'create': + self.create_volume(qos_policy_id) + result_message = "Volume created" + elif action == 'update': + self.update_volume(volume_id, qos_policy_id) + result_message = "Volume updated" + elif action == 'delete': + self.delete_volume(volume_id) + result_message = "Volume deleted" + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + # Create object and call apply + na_elementsw_volume = ElementSWVolume() + na_elementsw_volume.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py new file mode 100644 index 000000000..186ca85bc --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py @@ -0,0 +1,276 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Element Software volume clone""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_elementsw_volume_clone + +short_description: NetApp Element Software Create Volume Clone +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create volume clones on Element OS + +options: + + name: + description: + - The name of the clone. + required: true + type: str + + src_volume_id: + description: + - The id of the src volume to clone. id may be a numeric identifier or a volume name. + required: true + type: str + + src_snapshot_id: + description: + - The id of the snapshot to clone. id may be a numeric identifier or a snapshot name. + type: str + + account_id: + description: + - Account ID for the owner of this cloned volume. id may be a numeric identifier or an account name. + required: true + type: str + + attributes: + description: A YAML dictionary of attributes that you would like to apply on this cloned volume. + type: dict + + size: + description: + - The size of the cloned volume in (size_unit). + type: int + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + type: str + + access: + choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget'] + description: + - Access allowed for the volume. + - If unspecified, the access settings of the clone will be the same as the source. + - readOnly - Only read operations are allowed. + - readWrite - Reads and writes are allowed. + - locked - No reads or writes are allowed. + - replicationTarget - Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked. + type: str + +''' + +EXAMPLES = """ + - name: Clone Volume + na_elementsw_volume_clone: + hostname: "{{ elementsw_hostname }}" + username: "{{ elementsw_username }}" + password: "{{ elementsw_password }}" + name: CloneAnsibleVol + src_volume_id: 123 + src_snapshot_id: 41 + account_id: 3 + size: 1 + size_unit: gb + access: readWrite + attributes: {"virtual_network_id": 12345} + +""" + +RETURN = """ + +msg: + description: Success message + returned: success + type: str + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() + + +class ElementOSVolumeClone(object): + """ + Contains methods to parse arguments, + derive details of Element Software objects + and send requests to Element OS via + the Solidfire SDK + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check paramenters and ensure SDK is installed + """ + self._size_unit_map = netapp_utils.SF_BYTE_MAP + + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True), + src_volume_id=dict(required=True), + src_snapshot_id=dict(), + account_id=dict(required=True), + attributes=dict(type='dict', default=None), + size=dict(type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + access=dict(type='str', + default=None, choices=['readOnly', 'readWrite', + 'locked', 'replicationTarget']), + + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + parameters = self.module.params + + # set up state variables + self.name = parameters['name'] + self.src_volume_id = parameters['src_volume_id'] + self.src_snapshot_id = parameters['src_snapshot_id'] + self.account_id = parameters['account_id'] + self.attributes = parameters['attributes'] + + self.size_unit = parameters['size_unit'] + if parameters['size'] is not None: + self.size = parameters['size'] * \ + self._size_unit_map[self.size_unit] + else: + self.size = None + self.access = parameters['access'] + + if HAS_SF_SDK is False: + self.module.fail_json( + msg="Unable to import the SolidFire Python SDK") + else: + self.sfe = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.sfe) + + # add telemetry attributes + if self.attributes is not None: + self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone')) + else: + self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone') + + def get_account_id(self): + """ + Return account id if found + """ + try: + # Update and return self.account_id + self.account_id = self.elementsw_helper.account_exists(self.account_id) + return self.account_id + except Exception as err: + self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err)) + + def get_snapshot_id(self): + """ + Return snapshot details if found + """ + src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id) + # Update and return self.src_snapshot_id + if src_snapshot is not None: + self.src_snapshot_id = src_snapshot.snapshot_id + # Return src_snapshot + return self.src_snapshot_id + return None + + def get_src_volume_id(self): + """ + Return volume id if found + """ + src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id) + if src_vol_id is not None: + # Update and return self.volume_id + self.src_volume_id = src_vol_id + # Return src_volume_id + return self.src_volume_id + return None + + def clone_volume(self): + """Clone Volume from source""" + try: + self.sfe.clone_volume(volume_id=self.src_volume_id, + name=self.name, + new_account_id=self.account_id, + new_size=self.size, + access=self.access, + snapshot_id=self.src_snapshot_id, + attributes=self.attributes) + + except Exception as err: + self.module.fail_json(msg="Error creating clone %s of size %s" % (self.name, self.size), exception=to_native(err)) + + def apply(self): + """Perform pre-checks, call functions and exit""" + changed = False + result_message = "" + + if self.get_account_id() is None: + self.module.fail_json(msg="Account id not found: %s" % (self.account_id)) + + # there is only one state. other operations + # are part of the volume module + + # ensure that a volume with the clone name + # isn't already present + if self.elementsw_helper.volume_exists(self.name, self.account_id) is None: + # check for the source volume + if self.get_src_volume_id() is not None: + # check for a valid snapshot + if self.src_snapshot_id and not self.get_snapshot_id(): + self.module.fail_json(msg="Snapshot id not found: %s" % (self.src_snapshot_id)) + # change required + changed = True + else: + self.module.fail_json(msg="Volume id not found %s" % (self.src_volume_id)) + + if changed: + if self.module.check_mode: + result_message = "Check mode, skipping changes" + else: + self.clone_volume() + result_message = "Volume cloned" + + self.module.exit_json(changed=changed, msg=result_message) + + +def main(): + """Create object and call apply""" + volume_clone = ElementOSVolumeClone() + volume_clone.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py new file mode 100644 index 000000000..0d5b38a0d --- /dev/null +++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# (c) 2017, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' + +module: na_elementsw_volume_pair + +short_description: NetApp Element Software Volume Pair +extends_documentation_fragment: + - netapp.elementsw.netapp.solidfire +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete volume pair + +options: + + state: + description: + - Whether the specified volume pair should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + src_volume: + description: + - Source volume name or volume ID + required: true + type: str + + src_account: + description: + - Source account name or ID + required: true + type: str + + dest_volume: + description: + - Destination volume name or volume ID + required: true + type: str + + dest_account: + description: + - Destination account name or ID + required: true + type: str + + mode: + description: + - Mode to start the volume pairing + choices: ['async', 'sync', 'snapshotsonly'] + default: async + type: str + + dest_mvip: + description: + - Destination IP address of the paired cluster. + required: true + type: str + + dest_username: + description: + - Destination username for the paired cluster + - Optional if this is same as source cluster username. + type: str + + dest_password: + description: + - Destination password for the paired cluster + - Optional if this is same as source cluster password. + type: str + +''' + +EXAMPLES = """ + - name: Create volume pair + na_elementsw_volume_pair: + hostname: "{{ src_cluster_hostname }}" + username: "{{ src_cluster_username }}" + password: "{{ src_cluster_password }}" + state: present + src_volume: test1 + src_account: test2 + dest_volume: test3 + dest_account: test4 + mode: sync + dest_mvip: "{{ dest_cluster_hostname }}" + + - name: Delete volume pair + na_elementsw_volume_pair: + hostname: "{{ src_cluster_hostname }}" + username: "{{ src_cluster_username }}" + password: "{{ src_cluster_password }}" + state: absent + src_volume: 3 + src_account: 1 + dest_volume: 2 + dest_account: 1 + dest_mvip: "{{ dest_cluster_hostname }}" + dest_username: "{{ dest_cluster_username }}" + dest_password: "{{ dest_cluster_password }}" + +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class ElementSWVolumePair(object): + ''' class to handle volume pairing operations ''' + + def __init__(self): + """ + Setup Ansible parameters and SolidFire connection + """ + self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], + default='present'), + src_volume=dict(required=True, type='str'), + src_account=dict(required=True, type='str'), + dest_volume=dict(required=True, type='str'), + dest_account=dict(required=True, type='str'), + mode=dict(required=False, type='str', + choices=['async', 'sync', 'snapshotsonly'], + default='async'), + dest_mvip=dict(required=True, type='str'), + dest_username=dict(required=False, type='str'), + dest_password=dict(required=False, type='str', no_log=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + if HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + else: + self.elem = netapp_utils.create_sf_connection(module=self.module) + + self.elementsw_helper = NaElementSWModule(self.elem) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # get element_sw_connection for destination cluster + # overwrite existing source host, user and password with destination credentials + self.module.params['hostname'] = self.parameters['dest_mvip'] + # username and password is same as source, + # if dest_username and dest_password aren't specified + if self.parameters.get('dest_username'): + self.module.params['username'] = self.parameters['dest_username'] + if self.parameters.get('dest_password'): + self.module.params['password'] = self.parameters['dest_password'] + self.dest_elem = netapp_utils.create_sf_connection(module=self.module) + self.dest_elementsw_helper = NaElementSWModule(self.dest_elem) + + def check_if_already_paired(self, vol_id): + """ + Check for idempotency + A volume can have only one pair + Return paired-volume-id if volume is paired already + None if volume is not paired + """ + paired_volumes = self.elem.list_volumes(volume_ids=[vol_id], + is_paired=True) + for vol in paired_volumes.volumes: + for pair in vol.volume_pairs: + if pair is not None: + return pair.remote_volume_id + return None + + def pair_volumes(self): + """ + Start volume pairing on source, and complete on target volume + """ + try: + pair_key = self.elem.start_volume_pairing( + volume_id=self.parameters['src_vol_id'], + mode=self.parameters['mode']) + self.dest_elem.complete_volume_pairing( + volume_pairing_key=pair_key.volume_pairing_key, + volume_id=self.parameters['dest_vol_id']) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error pairing volume id %s" + % (self.parameters['src_vol_id']), + exception=to_native(err)) + + def pairing_exists(self, src_id, dest_id): + src_paired = self.check_if_already_paired(self.parameters['src_vol_id']) + dest_paired = self.check_if_already_paired(self.parameters['dest_vol_id']) + if src_paired is not None or dest_paired is not None: + return True + return None + + def unpair_volumes(self): + """ + Delete volume pair + """ + try: + self.elem.remove_volume_pair(volume_id=self.parameters['src_vol_id']) + self.dest_elem.remove_volume_pair(volume_id=self.parameters['dest_vol_id']) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error unpairing volume ids %s and %s" + % (self.parameters['src_vol_id'], + self.parameters['dest_vol_id']), + exception=to_native(err)) + + def get_account_id(self, account, type): + """ + Get source and destination account IDs + """ + try: + if type == 'src': + self.parameters['src_account_id'] = self.elementsw_helper.account_exists(account) + elif type == 'dest': + self.parameters['dest_account_id'] = self.dest_elementsw_helper.account_exists(account) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error: either account %s or %s does not exist" + % (self.parameters['src_account'], + self.parameters['dest_account']), + exception=to_native(err)) + + def get_volume_id(self, volume, type): + """ + Get source and destination volume IDs + """ + if type == 'src': + self.parameters['src_vol_id'] = self.elementsw_helper.volume_exists(volume, self.parameters['src_account_id']) + if self.parameters['src_vol_id'] is None: + self.module.fail_json(msg="Error: source volume %s does not exist" + % (self.parameters['src_volume'])) + elif type == 'dest': + self.parameters['dest_vol_id'] = self.dest_elementsw_helper.volume_exists(volume, self.parameters['dest_account_id']) + if self.parameters['dest_vol_id'] is None: + self.module.fail_json(msg="Error: destination volume %s does not exist" + % (self.parameters['dest_volume'])) + + def get_ids(self): + """ + Get IDs for volumes and accounts + """ + self.get_account_id(self.parameters['src_account'], 'src') + self.get_account_id(self.parameters['dest_account'], 'dest') + self.get_volume_id(self.parameters['src_volume'], 'src') + self.get_volume_id(self.parameters['dest_volume'], 'dest') + + def apply(self): + """ + Call create / delete volume pair methods + """ + self.get_ids() + paired = self.pairing_exists(self.parameters['src_vol_id'], + self.parameters['dest_vol_id']) + # calling helper to determine action + cd_action = self.na_helper.get_cd_action(paired, self.parameters) + if cd_action == "create": + self.pair_volumes() + elif cd_action == "delete": + self.unpair_volumes() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ Apply volume pair actions """ + vol_obj = ElementSWVolumePair() + vol_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/elementsw/requirements.txt b/ansible_collections/netapp/elementsw/requirements.txt new file mode 100644 index 000000000..2054956e3 --- /dev/null +++ b/ansible_collections/netapp/elementsw/requirements.txt @@ -0,0 +1 @@ +solidfire-sdk-python \ No newline at end of file diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py b/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py b/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py new file mode 100644 index 000000000..f60ee6782 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py b/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py b/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py new file mode 100644 index 000000000..0bd1e2550 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py @@ -0,0 +1,175 @@ +''' unit test for Ansible module: na_elementsw_account.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_access_group \ + import ElementSWAccessGroup as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +ADD_ERROR = 'some_error_in_add_access_group' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + + def list_volume_access_groups(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build access_group list: access_groups.name, access_groups.account_id ''' + access_groups = list() + access_group_list = self.Bunch(volume_access_groups=access_groups) + return access_group_list + + def create_volume_access_group(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'add' in self.where: + # The module does not check for a specific exception :( + raise OSError(ADD_ERROR) + + def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument + ''' returns account_id ''' + if self.force_error and 'account_id' in self.where: + account_id = None + else: + account_id = 1 + print('account_id', account_id) + account = self.Bunch(account_id=account_id) + result = self.Bunch(account=account) + return result + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_command_called(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'name': 'element_groupname', + 'account_id': 'element_account_id', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'name': 'element_groupname', + 'account_id': 'element_account_id', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + # apply() is calling list_accounts() and add_account() + my_obj.apply() + print(exc.value.args[0]) + message = 'Error creating volume access group element_groupname: %s' % ADD_ERROR + assert exc.value.args[0]['msg'] == message + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_invalid_account_id(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'name': 'element_groupname', + 'account_id': 'element_account_id', + 'volumes': ['volume1'], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['account_id']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + # apply() is calling list_accounts() and add_account() + my_obj.apply() + print(exc.value.args[0]) + message = 'Error: Specified account id "%s" does not exist.' % 'element_account_id' + assert exc.value.args[0]['msg'] == message diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py new file mode 100644 index 000000000..fb78ad78a --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py @@ -0,0 +1,245 @@ +''' unit test for Ansible module: na_elementsw_access_group_volumes.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_access_group_volumes \ + import ElementSWAccessGroupVolumes as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +MODIFY_ERROR = 'some_error_in_modify_access_group' + +VOLUME_ID = 777 + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None, volume_id=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + self.volume_id = volume_id + + def list_volume_access_groups(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build access_group list: access_groups.name, access_groups.account_id ''' + group_name = 'element_groupname' + if self.volume_id is None: + volume_list = list() + else: + volume_list = [self.volume_id] + access_group = self.Bunch(name=group_name, volume_access_group_id=888, volumes=volume_list) + access_groups = [access_group] + access_group_list = self.Bunch(volume_access_groups=access_groups) + return access_group_list + + def list_volumes_for_account(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build volume list: volume.name, volume.id ''' + volume = self.Bunch(name='element_volumename', volume_id=VOLUME_ID, delete_time='') + volumes = [volume] + volume_list = self.Bunch(volumes=volumes) + return volume_list + + def modify_volume_access_group(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'modify_exception' in self.where: + # The module does not check for a specific exception :( + raise OSError(MODIFY_ERROR) + + def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument + ''' returns account_id ''' + if self.force_error and 'get_account_id' in self.where: + account_id = None + else: + account_id = 1 + print('account_id', account_id) + account = self.Bunch(account_id=account_id) + result = self.Bunch(account=account) + return result + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + ARGS = { + 'state': 'present', + 'access_group': 'element_groupname', + 'volumes': 'element_volumename', + 'account_id': 'element_account_id', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_volume(self, mock_create_sf_connection): + ''' adding a volume ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_volume_idempotent(self, mock_create_sf_connection): + ''' adding a volume that is already in the access group ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(volume_id=VOLUME_ID) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_remove_volume(self, mock_create_sf_connection): + ''' removing a volume that is in the access group ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(volume_id=VOLUME_ID) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_remove_volume_idempotent(self, mock_create_sf_connection): + ''' removing a volume that is not in the access group ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_modify_exception(self, mock_create_sf_connection): + ''' modify does not return anything but can raise an exception ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error updating volume access group element_groupname: %s' % MODIFY_ERROR + assert exc.value.args[0]['msg'] == message + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_invalid_volume_name(self, mock_create_sf_connection): + ''' report error if volume does not exist ''' + args = dict(self.ARGS) + args['volumes'] = ['volume1'] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error: Specified volume %s does not exist' % 'volume1' + assert exc.value.args[0]['msg'] == message + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_invalid_account_group_name(self, mock_create_sf_connection): + ''' report error if access group does not exist ''' + args = dict(self.ARGS) + args['access_group'] = 'something_else' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error: Specified access group "%s" does not exist for account id: %s.' % ('something_else', 'element_account_id') + assert exc.value.args[0]['msg'] == message + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_invalid_account_id(self, mock_create_sf_connection): + ''' report error if account id is not found ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_account_id') + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error: Specified account id "%s" does not exist.' % 'element_account_id' + assert exc.value.args[0]['msg'] == message diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py new file mode 100644 index 000000000..8075ba5c4 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py @@ -0,0 +1,137 @@ +''' unit test for Ansible module: na_elementsw_account.py ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_account \ + import ElementSWAccount as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +ADD_ERROR = 'some_error_in_add_account' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + + def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build account list: account.username, account.account_id ''' + accounts = list() + account_list = self.Bunch(accounts=accounts) + return account_list + + def add_account(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'add' in self.where: + # The module does not check for a specific exception :( + raise OSError(ADD_ERROR) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_command_called(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'element_username': 'element_username', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'element_username': 'element_username', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + # apply() is calling list_accounts() and add_account() + my_obj.apply() + print(exc.value.args[0]) + message = 'Error creating account element_username: %s' % ADD_ERROR + assert exc.value.args[0]['msg'] == message diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py new file mode 100644 index 000000000..6624f374d --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py @@ -0,0 +1,228 @@ +''' unit test for Ansible module: na_elementsw_cluster.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import inspect +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster \ + import ElementSWCluster as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +NODE_ID1 = 777 +NODE_ID2 = 888 +NODE_ID3 = 999 + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __repr__(self): + results = dict() + for key, value in vars(self).items(): + results[key] = repr(value) + return repr(results) + + def __init__(self, force_error=False, where=None, nodes=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + self.nodes = nodes + self._port = 442 + self.called = list() + + def record(self, args, kwargs): + name = inspect.stack()[1][3] # caller function name + print('%s: , args: %s, kwargs: %s' % (name, args, kwargs)) + self.called.append(name) + + def create_cluster(self, *args, **kwargs): # pylint: disable=unused-argument + self.record(repr(args), repr(kwargs)) + + def send_request(self, *args, **kwargs): # pylint: disable=unused-argument + self.record(repr(args), repr(kwargs)) + + def get_config(self, *args, **kwargs): # pylint: disable=unused-argument + self.record(repr(args), repr(kwargs)) + if self.force_error and self.where == 'get_config_exception': + raise ConnectionError + if self.nodes is not None: + nodes = ['%d:%s' % (i, node) for i, node in enumerate(self.nodes)] + else: + nodes = list() + cluster = self.Bunch(ensemble=nodes, cluster='cl_name') + config = self.Bunch(cluster=cluster) + return self.Bunch(config=config) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + ARGS = { + # 'state': 'present', + 'management_virtual_ip': '10.10.10.10', + 'storage_virtual_ip': '10.10.10.11', + 'nodes': [NODE_ID1, NODE_ID2], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create(self, mock_create_sf_connection): + ''' create cluster basic ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_config_exception') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + msg = 'created' + assert msg in exc.value.args[0]['msg'] + assert 'create_cluster' in my_obj.sfe_node.called + assert 'send_request' not in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_extra_parms(self, mock_create_sf_connection): + ''' force a direct call to send_request ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['order_number'] = '12345' + args['serial_number'] = '54321' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_config_exception') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + assert 'send_request' in my_obj.sfe_node.called + assert 'create_cluster' not in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_idempotent(self, mock_create_sf_connection): + ''' cluster already exists with same nodes ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2]) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + assert 'send_request' not in my_obj.sfe_node.called + assert 'create_cluster' not in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_idempotent_extra_nodes(self, mock_create_sf_connection): + ''' cluster already exists with more nodes ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2, NODE_ID3]) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + msg = 'Error: found existing cluster with more nodes in ensemble.' + assert msg in exc.value.args[0]['msg'] + assert 'send_request' not in my_obj.sfe_node.called + assert 'create_cluster' not in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_idempotent_extra_nodes_ok(self, mock_create_sf_connection): + ''' cluster already exists with more nodes but we're OK with a superset ''' + args = dict(self.ARGS) + args['fail_if_cluster_already_exists_with_larger_ensemble'] = False + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2, NODE_ID3]) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + msg = 'cluster already exists' + assert msg in exc.value.args[0]['msg'] + assert 'send_request' not in my_obj.sfe_node.called + assert 'create_cluster' not in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_idempotent_missing_nodes(self, mock_create_sf_connection): + ''' cluster already exists with fewer nodes. + Since not every node is lister in the ensemble, we can't tell if it's an error or not ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1]) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + msg = 'cluster already exists' + assert msg in exc.value.args[0]['msg'] + assert 'send_request' not in my_obj.sfe_node.called + assert 'create_cluster' not in my_obj.sfe_node.called diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py new file mode 100644 index 000000000..79f461ccc --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py @@ -0,0 +1,157 @@ +''' unit test for Ansible module: na_elementsw_cluster_config.py ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster_config \ + import ElementSWClusterConfig as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +GET_ERROR = 'some_error_in_get_ntp_info' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args(self): + return dict({ + 'hostname': '10.253.168.129', + 'username': 'namburu', + 'password': 'SFlab1234', + }) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_module_fail_when_required_args_missing(self, mock_create_sf_connection): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_setup_ntp_info_called(self, mock_create_sf_connection): + ''' test if setup_ntp_info is called ''' + module_args = {} + module_args.update(self.set_default_args()) + ntp_dict = {'set_ntp_info': {'broadcastclient': None, + 'ntp_servers': ['1.1.1.1']}} + module_args.update(ntp_dict) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_setup_ntp_info: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_set_encryption_at_rest_called(self, mock_create_sf_connection): + ''' test if set_encryption_at_rest is called ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args.update({'encryption_at_rest': 'present'}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_set_encryption_at_rest enable: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + module_args.update({'encryption_at_rest': 'absent'}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_set_encryption_at_rest disable: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_enable_feature_called(self, mock_create_sf_connection): + ''' test if enable_feature for vvols is called ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args.update({'enable_virtual_volumes': True}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_enable_feature: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_set_cluster_full_threshold_called(self, mock_create_sf_connection): + ''' test if set_cluster_full threshold is called ''' + module_args = {} + module_args.update(self.set_default_args()) + cluster_mod_dict = \ + {'modify_cluster_full_threshold': {'stage2_aware_threshold': 2, + 'stage3_block_threshold_percent': 2, + 'max_metadata_over_provision_factor': 2}} + module_args.update(cluster_mod_dict) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_set_cluster_full_threshold: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py new file mode 100644 index 000000000..9236daa04 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py @@ -0,0 +1,176 @@ +''' unit test for Ansible module: na_elementsw_cluster_snmp.py ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster_snmp \ + import ElementSWClusterSnmp as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +GET_ERROR = 'some_error_in_get_snmp_info' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args(self): + return dict({ + 'hostname': '10.117.78.131', + 'username': 'admin', + 'password': 'netapp1!', + }) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_module_fail_when_required_args_missing(self, mock_create_sf_connection): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_enable_snmp_called(self, mock_create_sf_connection): + ''' test if enable_snmp is called ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args.update({'snmp_v3_enabled': True, + 'state': 'present'}) + module_args.update({'usm_users': {'access': 'rouser', + 'name': 'TestUser', + 'password': 'ChangeMe@123', + 'passphrase': 'ChangeMe@123', + 'secLevel': 'auth', }}) + + module_args.update({'networks': {'access': 'ro', + 'cidr': 24, + 'community': 'TestNetwork', + 'network': '192.168.0.1', }}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_if_enable_snmp_called: %s' % repr(exc.value)) + assert exc.value + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_configure_snmp_from_version_3_TO_version_2_called(self, mock_create_sf_connection): + ''' test if configure snmp from version_3 to version_2''' + module_args = {} + module_args.update(self.set_default_args()) + module_args.update({'snmp_v3_enabled': False, + 'state': 'present'}) + module_args.update({'usm_users': {'access': 'rouser', + 'name': 'TestUser', + 'password': 'ChangeMe@123', + 'passphrase': 'ChangeMe@123', + 'secLevel': 'auth', }}) + + module_args.update({'networks': {'access': 'ro', + 'cidr': 24, + 'community': 'TestNetwork', + 'network': '192.168.0.1', }}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_ensure_configure_snmp_from_version_3_TO_version_2_called: %s' % repr(exc.value)) + assert exc.value + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_configure_snmp_from_version_2_TO_version_3_called(self, mock_create_sf_connection): + ''' test if configure snmp from version_2 to version_3''' + module_args = {} + module_args.update(self.set_default_args()) + module_args.update({'snmp_v3_enabled': True, + 'state': 'present'}) + module_args.update({'usm_users': {'access': 'rouser', + 'name': 'TestUser_sample', + 'password': 'ChangeMe@123', + 'passphrase': 'ChangeMe@123', + 'secLevel': 'auth', }}) + + module_args.update({'networks': {'access': 'ro', + 'cidr': 24, + 'community': 'TestNetwork', + 'network': '192.168.0.1', }}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_ensure_configure_snmp_from_version_2_TO_version_3_called: %s' % repr(exc.value)) + assert exc.value + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_disable_snmp_called(self, mock_create_sf_connection): + ''' test if disable_snmp is called ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args.update({'state': 'absent'}) + set_module_args(module_args) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_if_disable_snmp_called: %s' % repr(exc.value)) + assert exc.value diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py new file mode 100644 index 000000000..dc8fd5e23 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py @@ -0,0 +1,344 @@ +''' unit tests for Ansible module: na_elementsw_info.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import inspect +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_info \ + import ElementSWInfo as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +NODE_ID1 = 777 +NODE_ID2 = 888 +NODE_ID3 = 999 + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __repr__(self): + results = dict() + for key, value in vars(self).items(): + results[key] = repr(value) + return repr(results) + + def to_json(self): + return json.loads(json.dumps(self, default=lambda x: x.__dict__)) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + self.nodes = [NODE_ID1, NODE_ID2, NODE_ID3] + self._port = 442 + self.called = list() + if force_error and where == 'cx': + raise netapp_utils.solidfire.common.ApiConnectionError('testme') + + def record(self, args, kwargs): + name = inspect.stack()[1][3] # caller function name + print('%s: , args: %s, kwargs: %s' % (name, args, kwargs)) + self.called.append(name) + + def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build account list: account.username, account.account_id ''' + self.record(repr(args), repr(kwargs)) + accounts = list() + accounts.append({'username': 'user1'}) + account_list = self.Bunch(accounts=accounts) + return account_list + + def list_all_nodes(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build all_node list: all_node.name, all_node.all_node_id ''' + self.record(repr(args), repr(kwargs)) + all_nodes = list() + all_nodes.append({'id': 123}) + all_node_list = self.Bunch(all_nodes=all_nodes) + return all_node_list + + def list_drives(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build drive list: drive.name, drive.drive_id ''' + self.record(repr(args), repr(kwargs)) + drives = list() + drives.append({'id': 123}) + drive_list = self.Bunch(drives=drives) + return drive_list + + def get_config(self, *args, **kwargs): # pylint: disable=unused-argument + self.record(repr(args), repr(kwargs)) + if self.force_error and self.where == 'get_config_exception': + raise ConnectionError + if self.nodes is not None: + nodes = ['%d:%s' % (i, node) for i, node in enumerate(self.nodes)] + else: + nodes = list() + cluster = self.Bunch(ensemble=nodes, cluster='cl_name') + config = self.Bunch(cluster=cluster) + return self.Bunch(config=config) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + ARGS = { + # 'state': 'present', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_all_default(self, mock_create_sf_connection): + ''' gather all by default ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + assert 'cluster_accounts' in exc.value.args[0]['info'] + assert 'node_config' in exc.value.args[0]['info'] + username = exc.value.args[0]['info']['cluster_accounts']['accounts'][0]['username'] + assert username == 'user1' + assert 'list_accounts' in my_obj.sfe_node.called + assert 'get_config' in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_all_all(self, mock_create_sf_connection): + ''' gather all explictly ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['all'] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + assert 'list_accounts' in my_obj.sfe_node.called + assert 'get_config' in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_all_clusters(self, mock_create_sf_connection): + ''' gather all cluster scoped subsets ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['all_clusters'] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + assert 'cluster_accounts' in exc.value.args[0]['info'] + accounts = exc.value.args[0]['info']['cluster_accounts'] + print('accounts: >>%s<<' % accounts, type(accounts)) + print(my_obj.sfe_node.called) + assert 'list_accounts' in my_obj.sfe_node.called + assert 'get_config' not in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_all_nodes(self, mock_create_sf_connection): + ''' gather all node scoped subsets ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['all_nodes'] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + assert 'node_config' in exc.value.args[0]['info'] + config = exc.value.args[0]['info']['node_config'] + print('config: >>%s<<' % config, type(config)) + print(my_obj.sfe_node.called) + assert 'list_accounts' not in my_obj.sfe_node.called + assert 'get_config' in my_obj.sfe_node.called + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_all_nodes_not_alone(self, mock_create_sf_connection): + ''' gather all node scoped subsets but fail as another subset is present ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['all_nodes', 'dummy'] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + msg = 'no other subset is allowed' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_filter_success(self, mock_create_sf_connection): + ''' filter on key, value - succesful match ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['cluster_accounts'] + args['filter'] = dict(username='user1') + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + username = exc.value.args[0]['info']['cluster_accounts']['accounts'][0]['username'] + assert username == 'user1' + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_filter_bad_key(self, mock_create_sf_connection): + ''' filter on key, value - key not found ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['cluster_accounts'] + args['filter'] = dict(bad_key='user1') + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + msg = 'Error: key bad_key not found in' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_filter_bad_key_ignored(self, mock_create_sf_connection): + ''' filter on key, value - key not found - ignore error ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['cluster_accounts'] + args['filter'] = dict(bad_key='user1') + args['fail_on_key_not_found'] = False + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['info']['cluster_accounts']['accounts'] == list() + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_filter_record_not_found(self, mock_create_sf_connection): + ''' filter on key, value - no match ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['cluster_accounts'] + args['filter'] = dict(bad_key='user1') + args['fail_on_key_not_found'] = False + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['info']['cluster_accounts']['accounts'] == list() + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_info_filter_record_not_found_error(self, mock_create_sf_connection): + ''' filter on key, value - no match - force error on empty ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['gather_subsets'] = ['cluster_accounts'] + args['filter'] = dict(username='user111') + args['fail_on_record_not_found'] = True + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + msg = 'Error: no match for' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_connection_error(self, mock_create_sf_connection): + ''' filter on key, value - no match - force error on empty ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # force a connection exception + mock_create_sf_connection.side_effect = netapp_utils.solidfire.common.ApiConnectionError('testme') + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print(exc.value.args[0]) + msg = 'Failed to create connection for hostname:442' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_other_connection_error(self, mock_create_sf_connection): + ''' filter on key, value - no match - force error on empty ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # force a connection exception + mock_create_sf_connection.side_effect = KeyError('testme') + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print(exc.value.args[0]) + msg = 'Failed to connect for hostname:442' + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py new file mode 100644 index 000000000..ee5ff85db --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py @@ -0,0 +1,201 @@ +''' unit test for Ansible module: na_elementsw_initiators.py ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_initiators \ + import ElementSWInitiators as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + class Initiator(object): + def __init__(self, entries): + self.__dict__.update(entries) + + def list_initiators(self): + ''' build initiator Obj ''' + initiator = self.Bunch( + initiator_name="a", + initiator_id=13, + alias="a2", + # Note: 'config-mgmt' and 'event-source' are added for telemetry + attributes={'key': 'value', 'config-mgmt': 'ansible', 'event-source': 'na_elementsw_initiators'}, + volume_access_groups=[1] + ) + initiators = self.Bunch( + initiators=[initiator] + ) + return initiators + + def create_initiators(self, *args, **kwargs): # pylint: disable=unused-argument + ''' mock method ''' + pass + + def delete_initiators(self, *args, **kwargs): # pylint: disable=unused-argument + ''' mock method ''' + pass + + def modify_initiators(self, *args, **kwargs): # pylint: disable=unused-argument + ''' mock method ''' + pass + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args(self): + return dict({ + 'hostname': '10.253.168.129', + 'username': 'namburu', + 'password': 'SFlab1234', + }) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_module_fail_when_required_args_missing(self, mock_create_sf_connection): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_initiator(self, mock_create_sf_connection): + ''' test if create initiator is called ''' + module_args = {} + module_args.update(self.set_default_args()) + initiator_dict = { + "state": "present", + "initiators": [{ + "name": "newinitiator1", + "alias": "newinitiator1alias", + "attributes": {"key1": "value1"} + }] + } + module_args.update(initiator_dict) + set_module_args(module_args) + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_create_initiators: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_initiator(self, mock_create_sf_connection): + ''' test if delete initiator is called ''' + module_args = {} + module_args.update(self.set_default_args()) + initiator_dict = { + "state": "absent", + "initiators": [{ + "name": "a" + }] + } + module_args.update(initiator_dict) + set_module_args(module_args) + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_delete_initiators: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_initiator(self, mock_create_sf_connection): + ''' test if modify initiator is called ''' + module_args = {} + module_args.update(self.set_default_args()) + initiator_dict = { + "state": "present", + "initiators": [{ + "name": "a", + "alias": "a3", + "attributes": {"key": "value"} + }] + } + module_args.update(initiator_dict) + set_module_args(module_args) + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_modify_initiators: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_initiator_idempotent(self, mock_create_sf_connection): + ''' test if modify initiator is called ''' + module_args = {} + module_args.update(self.set_default_args()) + initiator_dict = { + "state": "present", + "initiators": [{ + "name": "a", + "alias": "a2", + "attributes": {"key": "value"}, + "volume_access_group_id": 1 + }] + } + module_args.update(initiator_dict) + set_module_args(module_args) + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_modify_initiators: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py new file mode 100644 index 000000000..5364a4e76 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py @@ -0,0 +1,293 @@ +''' unit tests for Ansible module: na_elementsw_info.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import inspect +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_network_interfaces \ + import ElementSWNetworkInterfaces as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +NODE_ID1 = 777 +NODE_ID2 = 888 +NODE_ID3 = 999 + +MAPPING = dict( + bond_mode='bond-mode', + bond_lacp_rate='bond-lacp_rate', + dns_nameservers='dns-nameservers', + dns_search='dns-search', + virtual_network_tag='virtualNetworkTag', +) + + +def mapkey(key): + if key in MAPPING: + return MAPPING[key] + return key + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __repr__(self): + results = dict() + for key, value in vars(self).items(): + results[key] = repr(value) + return repr(results) + + def to_json(self): + return json.loads(json.dumps(self, default=lambda x: x.__dict__)) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + # self._port = 442 + self.called = list() + self.set_network_config_args = dict() + if force_error and where == 'cx': + raise netapp_utils.solidfire.common.ApiConnectionError('testme') + + def record(self, args, kwargs): # pylint: disable=unused-argument + name = inspect.stack()[1][3] # caller function name + # print('%s: , args: %s, kwargs: %s' % (name, args, kwargs)) + self.called.append(name) + + def set_network_config(self, *args, **kwargs): # pylint: disable=unused-argument + self.record(repr(args), repr(kwargs)) + print('network:', kwargs['network'].to_json()) + self.set_network_config_args = kwargs['network'].to_json() + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + DEPRECATED_ARGS = { + 'ip_address_1g': 'ip_address_1g', + 'subnet_1g': 'subnet_1g', + 'gateway_address_1g': 'gateway_address_1g', + 'mtu_1g': 'mtu_1g', # make sure the use a value != from default + 'bond_mode_1g': 'ALB', # make sure the use a value != from default + 'lacp_1g': 'Fast', # make sure the use a value != from default + 'ip_address_10g': 'ip_address_10g', + 'subnet_10g': 'subnet_10g', + 'gateway_address_10g': 'gateway_address_10g', + 'mtu_10g': 'mtu_10g', # make sure the use a value != from default + 'bond_mode_10g': 'LACP', # make sure the use a value != from default + 'lacp_10g': 'Fast', # make sure the use a value != from default + 'method': 'static', + 'dns_nameservers': 'dns_nameservers', + 'dns_search_domains': 'dns_search_domains', + 'virtual_network_tag': 'virtual_network_tag', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + ARGS = { + 'bond_1g': { + 'address': '10.10.10.10', + 'netmask': '255.255.255.0', + 'gateway': '10.10.10.1', + 'mtu': '1500', + 'bond_mode': 'ActivePassive', + 'dns_nameservers': ['dns_nameservers'], + 'dns_search': ['dns_search_domains'], + 'virtual_network_tag': 'virtual_network_tag', + }, + 'bond_10g': { + 'bond_mode': 'LACP', + 'bond_lacp_rate': 'Fast', + }, + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_deprecated_nothing(self): + ''' deprecated without 1g or 10g options ''' + args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args + for key in list(args): + if '1g' in key or '10g' in key: + del args[key] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'Please use the new bond_1g or bond_10g options to configure the bond interfaces.' + assert msg in exc.value.args[0]['msg'] + msg = 'This module cannot set or change "method"' + assert msg in exc.value.args[0]['msg'] + + def test_deprecated_all(self): + ''' deprecated with all options ''' + args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'Please use the new bond_1g and bond_10g options to configure the bond interfaces.' + assert msg in exc.value.args[0]['msg'] + msg = 'This module cannot set or change "method"' + assert msg in exc.value.args[0]['msg'] + + def test_deprecated_1g_only(self): + ''' deprecated with 1g options only ''' + args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args + for key in list(args): + if '10g' in key: + del args[key] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'Please use the new bond_1g option to configure the bond 1G interface.' + assert msg in exc.value.args[0]['msg'] + msg = 'This module cannot set or change "method"' + assert msg in exc.value.args[0]['msg'] + + def test_deprecated_10g_only(self): + ''' deprecated with 10g options only ''' + args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args + for key in list(args): + if '1g' in key: + del args[key] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'Please use the new bond_10g option to configure the bond 10G interface.' + assert msg in exc.value.args[0]['msg'] + msg = 'This module cannot set or change "method"' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_nothing(self, mock_create_sf_connection): + ''' modify without 1g or 10g options ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + for key in list(args): + if '1g' in key or '10g' in key: + del args[key] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + print('LN:', my_obj.module.params) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + assert len(my_obj.sfe.set_network_config_args) == 0 + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_all(self, mock_create_sf_connection): + ''' modify with all options ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + assert 'Bond1G' in my_obj.sfe.set_network_config_args + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_1g_only(self, mock_create_sf_connection): + ''' modify with 1g options only ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + for key in list(args): + if '10g' in key: + del args[key] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + assert 'Bond1G' in my_obj.sfe.set_network_config_args + assert 'Bond10G' not in my_obj.sfe.set_network_config_args + print(my_obj.sfe.set_network_config_args['Bond1G']) + for key in args['bond_1g']: + if key != 'bond_lacp_rate': + assert my_obj.sfe.set_network_config_args['Bond1G'][mapkey(key)] == args['bond_1g'][key] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_10g_only(self, mock_create_sf_connection): + ''' modify with 10g options only ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + for key in list(args): + if '1g' in key: + del args[key] + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + assert 'Bond1G' not in my_obj.sfe.set_network_config_args + assert 'Bond10G' in my_obj.sfe.set_network_config_args + assert my_obj.sfe.set_network_config_args['Bond10G']['bond-lacp_rate'] == args['bond_10g']['bond_lacp_rate'] + for key in args['bond_10g']: + assert my_obj.sfe.set_network_config_args['Bond10G'][mapkey(key)] == args['bond_10g'][key] diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py new file mode 100644 index 000000000..3e163d000 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py @@ -0,0 +1,324 @@ +''' unit test for Ansible module: na_elementsw_node.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_node \ + import ElementSWNode as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +MODIFY_ERROR = 'some_error_in_modify_access_group' + +NODE_ID1 = 777 +NODE_ID2 = 888 +NODE_NAME1 = 'node_name1' +NODE_NAME2 = 'node_name2' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None, node_id=None, cluster_name='', node_state='Pending'): + ''' save arguments ''' + self.force_error = force_error + self.where = where + self.node_id = node_id + self.cluster_name = cluster_name + self.node_state = node_state + + def list_all_nodes(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build access_group list: access_groups.name, access_groups.account_id ''' + nodes = list() + pending_nodes = list() + active_pending_nodes = list() + if self.node_id is None: + node_list = list() + else: + node_list = [self.node_id] + attrs1 = dict(mip='10.10.10.101', name=NODE_NAME1, node_id=NODE_ID1) + attrs2 = dict(mip='10.10.10.101', name=NODE_NAME2, node_id=NODE_ID2) + if self.where == 'pending': + attrs1['pending_node_id'] = NODE_ID1 + attrs2['pending_node_id'] = NODE_ID2 + node1 = self.Bunch(**attrs1) + node2 = self.Bunch(**attrs2) + if self.where == 'nodes': + nodes = [node1, node2] + elif self.where == 'pending': + pending_nodes = [node1, node2] + elif self.where == 'active_pending': + active_pending_nodes = [node1, node2] + node_list = self.Bunch(nodes=nodes, pending_nodes=pending_nodes, pending_active_nodes=active_pending_nodes) + return node_list + + def add_nodes(self, *args, **kwargs): # pylint: disable=unused-argument + print('adding_node: ', repr(args), repr(kwargs)) + + def remove_nodes(self, *args, **kwargs): # pylint: disable=unused-argument + print('adding_node: ', repr(args), repr(kwargs)) + + def get_cluster_config(self, *args, **kwargs): # pylint: disable=unused-argument + print('get_cluster_config: ', repr(args), repr(kwargs)) + cluster = self.Bunch(cluster=self.cluster_name, state=self.node_state) + return self.Bunch(cluster=cluster) + + def set_cluster_config(self, *args, **kwargs): # pylint: disable=unused-argument + print('set_cluster_config: ', repr(args), repr(kwargs)) + + def list_drives(self, *args, **kwargs): # pylint: disable=unused-argument + print('list_drives: ', repr(args), repr(kwargs)) + drive = self.Bunch(node_id=self.node_id, status="active") + return self.Bunch(drives=[drive]) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + ARGS = { + 'state': 'present', + 'node_ids': [NODE_ID1, NODE_ID2], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_node_fail_not_pending(self, mock_create_sf_connection): + ''' adding a node - fails as these nodes are unknown ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + msg = 'nodes not in pending or active lists' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_node(self, mock_create_sf_connection): + ''' adding a node ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='pending') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_node_idempotent(self, mock_create_sf_connection): + ''' adding a node that is already in the cluster ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='nodes') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_remove_node(self, mock_create_sf_connection): + ''' removing a node that is in the cluster ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='nodes') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_remove_node_idempotent(self, mock_create_sf_connection): + ''' removing a node that is not in the cluster ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_remove_node_with_active_drive(self, mock_create_sf_connection): + ''' removing a node that is in the cluster but still associated with a drive ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(node_id=NODE_ID1, where='nodes') + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + msg = 'Error deleting node %s: node has active drives' % NODE_NAME1 + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_set_cluster_name_only(self, mock_create_sf_connection): + ''' set cluster name without adding the node ''' + args = dict(self.ARGS) + args['preset_only'] = True + args['cluster_name'] = 'cluster_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + message = 'List of updated nodes with cluster_name:' + assert message in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_set_cluster_name_only_idempotent(self, mock_create_sf_connection): + ''' set cluster name without adding the node - name already set ''' + args = dict(self.ARGS) + args['preset_only'] = True + args['cluster_name'] = 'cluster_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(cluster_name=args['cluster_name']) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + message = '' + assert message == exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_set_cluster_name_and_add(self, mock_create_sf_connection): + ''' set cluster name and add the node ''' + args = dict(self.ARGS) + args['cluster_name'] = 'cluster_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='pending') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + message = 'List of updated nodes with cluster_name:' + assert message in exc.value.args[0]['msg'] + message = 'List of added nodes: ' + assert message in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_set_cluster_name_and_add_idempotent(self, mock_create_sf_connection): + ''' set cluster name and add the node ''' + args = dict(self.ARGS) + args['cluster_name'] = 'cluster_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name=args['cluster_name']) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + message = '' + assert message == exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_set_cluster_name_already_active_no_change(self, mock_create_sf_connection): + ''' set cluster name fails because node state is 'Active' ''' + args = dict(self.ARGS) + args['cluster_name'] = 'cluster_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name=args['cluster_name'], node_state='Active') + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + message = '' + assert message == exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_set_cluster_name_already_active_change_not_allowed(self, mock_create_sf_connection): + ''' set cluster name fails because node state is 'Active' ''' + args = dict(self.ARGS) + args['cluster_name'] = 'new_cluster_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name='old_cluster_name', node_state='Active') + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = "Error updating cluster name for node %s, already in 'Active' state" % NODE_ID1 + assert message == exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py new file mode 100644 index 000000000..83ac3711a --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py @@ -0,0 +1,300 @@ +''' unit test for Ansible module: na_elementsw_qos_policy.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_qos_policy \ + import ElementSWQosPolicy as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +CREATE_ERROR = 'create', 'some_error_in_create_qos_policy' +MODIFY_ERROR = 'modify', 'some_error_in_modify_qos_policy' +DELETE_ERROR = 'delete', 'some_error_in_delete_qos_policy' + +POLICY_ID = 888 +POLICY_NAME = 'element_qos_policy_name' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None, qos_policy_name=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + self.policy_name = qos_policy_name + + def list_qos_policies(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build qos_policy list: qos_policy.name, qos_policy.account_id ''' + if self.policy_name: + qos_policy_name = self.policy_name + else: + qos_policy_name = POLICY_NAME + qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000) + qos_policy = self.Bunch(name=qos_policy_name, qos_policy_id=POLICY_ID, qos=qos) + qos_policies = [qos_policy] + qos_policy_list = self.Bunch(qos_policies=qos_policies) + return qos_policy_list + + def create_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'create_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*CREATE_ERROR) + + def modify_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'modify_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*MODIFY_ERROR) + + def delete_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'delete_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + ARGS = { + 'state': 'present', + 'name': 'element_qos_policy_name', + 'qos': {'minIOPS': 1000, 'maxIOPS': 20000, 'burstIOPS': 20000}, + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_qos_policy(self, mock_create_sf_connection): + ''' adding a qos_policy ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['name'] += '_1' # new name to force a create + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_qos_policy_idempotent(self, mock_create_sf_connection): + ''' adding a qos_policy ''' + args = dict(self.ARGS) + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_qos_policy(self, mock_create_sf_connection): + ''' removing a qos policy ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_qos_policy_idempotent(self, mock_create_sf_connection): + ''' removing a qos policy ''' + args = dict(self.ARGS) + args['state'] = 'absent' + args['name'] += '_1' # new name to force idempotency + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_qos_policy(self, mock_create_sf_connection): + ''' modifying a qos policy ''' + args = dict(self.ARGS) + args['qos'] = {'minIOPS': 2000} + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_rename_qos_policy(self, mock_create_sf_connection): + ''' renaming a qos policy ''' + args = dict(self.ARGS) + args['from_name'] = args['name'] + args['name'] = 'a_new_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_rename_modify_qos_policy_idempotent(self, mock_create_sf_connection): + ''' renaming a qos policy ''' + args = dict(self.ARGS) + args['from_name'] = 'some_older_name' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_qos_policy_exception(self, mock_create_sf_connection): + ''' creating a qos policy can raise an exception ''' + args = dict(self.ARGS) + args['name'] += '_1' # new name to force a create + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['create_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error creating qos policy: %s' % POLICY_NAME + assert exc.value.args[0]['msg'].startswith(message) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_qos_policy_exception(self, mock_create_sf_connection): + ''' modifying a qos policy can raise an exception ''' + args = dict(self.ARGS) + args['qos'] = {'minIOPS': 2000} + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error updating qos policy: %s' % POLICY_NAME + assert exc.value.args[0]['msg'].startswith(message) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_qos_policy_exception(self, mock_create_sf_connection): + ''' deleting a qos policy can raise an exception ''' + args = dict(self.ARGS) + args['state'] = 'absent' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['delete_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error deleting qos policy: %s' % POLICY_NAME + assert exc.value.args[0]['msg'].startswith(message) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_missing_qos_option(self, mock_create_sf_connection): + ''' report error if qos option is not given on create ''' + args = dict(self.ARGS) + args['name'] += '_1' # new name to force a create + args.pop('qos') + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = "Error creating qos policy: %s, 'qos:' option is required" % args['name'] + assert exc.value.args[0]['msg'] == message + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_missing_from_name_policy(self, mock_create_sf_connection): + ''' report error if qos policy to rename does not exist ''' + args = dict(self.ARGS) + args['name'] += '_1' # new name to force a create + args['from_name'] = 'something_not_likely_to_exist' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = "Error renaming qos policy, no existing policy with name/id: %s" % args['from_name'] + assert exc.value.args[0]['msg'] == message diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py new file mode 100644 index 000000000..7dc6e2d6b --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py @@ -0,0 +1,138 @@ +''' unit test for Ansible module: na_elementsw_account.py ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_account \ + import ElementSWAccount as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +ADD_ERROR = 'some_error_in_add_account' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + +# TODO: replace list_accounts and add_account as needed + def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build account list: account.username, account.account_id ''' + accounts = list() + account_list = self.Bunch(accounts=accounts) + return account_list + + def add_account(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'add' in self.where: + # The module does not check for a specific exception :( + raise OSError(ADD_ERROR) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_ensure_command_called(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'element_username': 'element_username', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection): + ''' a more interesting test ''' + set_module_args({ + 'state': 'present', + 'element_username': 'element_username', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + # apply() is calling list_accounts() and add_account() + my_obj.apply() + print(exc.value.args[0]) + message = 'Error creating account element_username: %s' % ADD_ERROR + assert exc.value.args[0]['msg'] == message diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py new file mode 100644 index 000000000..e2dc51f79 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py @@ -0,0 +1,343 @@ +''' unit test for Ansible module: na_elementsw_account.py ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch, Mock +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan \ + import ElementSWVlan as vlan # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +ADD_ERROR = 'some_error_in_add_account' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + class Vlan(object): + def __init__(self, entries): + self.__dict__.update(entries) + + def __init__(self, force_error=False, where=None): + ''' save arguments ''' + self.force_error = force_error + self.where = where + + def list_virtual_networks(self, virtual_network_tag=None): # pylint: disable=unused-argument + ''' list of vlans ''' + if virtual_network_tag == '1': + add1 = self.Bunch( + start='2.2.2.2', + size=4 + ) + add2 = self.Bunch( + start='3.3.3.3', + size=4 + ) + vlan = self.Bunch( + attributes={'key': 'value', 'config-mgmt': 'ansible', 'event-source': 'na_elementsw_vlan'}, + name="test", + address_blocks=[ + add1, + add2 + ], + svip='192.168.1.2', + gateway='0.0.0.0', + netmask='255.255.248.0', + namespace=False + ) + vlans = self.Bunch( + virtual_networks=[vlan] + ) + else: + vlans = self.Bunch( + virtual_networks=[] + ) + return vlans + + def add_virtual_network(self, virtual_network_tag=None, **create): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'add' in self.where: + # The module does not check for a specific exception :( + raise OSError(ADD_ERROR) + + def remove_virtual_network(self, virtual_network_tag=None): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'remove' in self.where: + # The module does not check for a specific exception :( + raise OSError(ADD_ERROR) + + def modify_virtual_network(self, virtual_network_tag=None, **modify): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'modify' in self.where: + # The module does not check for a specific exception :( + raise OSError(ADD_ERROR) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + vlan() + print('Info: %s' % exc.value.args[0]['msg']) + + def mock_args(self): + args = { + 'state': 'present', + 'name': 'test', + 'vlan_tag': 1, + 'address_blocks': [ + {'start': '192.168.1.2', 'size': 5} + ], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'netmask': '255.255.248.0', + 'gateway': '0.0.0.0', + 'namespace': False, + 'svip': '192.168.1.2' + } + return dict(args) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module.NaElementSWModule.set_element_attributes') + def test_successful_create(self, mock_set_attributes, mock_create_sf_connection): + ''' successful create''' + mock_set_attributes.return_value = {'key': 'new_value'} + data = self.mock_args() + data['vlan_tag'] = '3' + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_successful_delete(self, mock_create_sf_connection): + ''' successful delete''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_successful_modify(self, mock_create_sf_connection): + ''' successful modify''' + data = self.mock_args() + data['svip'] = '3.4.5.6' + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details') + def test_successful_modify_address_blocks_same_length(self, mock_get, mock_create_sf_connection): + ''' successful modify''' + mock_get.return_value = { + 'address_blocks': [ + {'start': '10.10.10.20', 'size': 5}, + {'start': '10.10.10.40', 'size': 5} + ] + } + data = self.mock_args() + data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5}, + {'start': '10.20.10.50', 'size': 5}] + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details') + def test_successful_modify_address_blocks_different_length_1(self, mock_get, mock_create_sf_connection): + ''' successful modify''' + mock_get.return_value = { + 'address_blocks': [ + {'start': '10.10.10.20', 'size': 5}, + {'start': '10.20.10.30', 'size': 5} + ] + } + data = self.mock_args() + data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5}, + {'start': '10.20.10.30', 'size': 5}, + {'start': '10.20.10.50', 'size': 5}] + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details') + def test_successful_modify_address_blocks_different_length_2(self, mock_get, mock_create_sf_connection): + ''' successful modify''' + mock_get.return_value = { + 'address_blocks': [ + {'start': '10.10.10.20', 'size': 5}, + {'start': '10.20.10.30', 'size': 5}, + {'start': '10.20.10.40', 'size': 5} + ] + } + data = self.mock_args() + data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5}, + {'start': '10.20.10.40', 'size': 5}, + {'start': '10.20.10.30', 'size': 5}] + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details') + def test_successful_modify_address_blocks_different_length_3(self, mock_get, mock_create_sf_connection): + ''' successful modify''' + mock_get.return_value = { + 'address_blocks': [ + {'start': '10.10.10.20', 'size': 5}, + {'start': '10.10.10.30', 'size': 5}, + {'start': '10.20.10.40', 'size': 5} + ] + } + data = self.mock_args() + data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5}, + {'start': '10.20.10.40', 'size': 5}, + {'start': '10.20.10.30', 'size': 5}] + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_helper_validate_keys(self, mock_create_sf_connection): + '''test validate_keys()''' + data = self.mock_args() + del data['svip'] + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.validate_keys() + msg = "One or more required fields ['address_blocks', 'svip', 'netmask', 'name'] for creating VLAN is missing" + assert exc.value.args[0]['msg'] == msg + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_successful_modify_idempotent(self, mock_create_sf_connection): + ''' successful modify''' + data = self.mock_args() + data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4}, + {'start': '3.3.3.3', 'size': 4}] + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_successful_modify_attribute_value(self, mock_create_sf_connection): + ''' successful modify''' + data = self.mock_args() + data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4}, + {'start': '3.3.3.3', 'size': 4}] + data['attributes'] = {'key': 'value2'} + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_successful_modify_attribute_key(self, mock_create_sf_connection): + ''' successful modify''' + data = self.mock_args() + data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4}, + {'start': '3.3.3.3', 'size': 4}] + data['attributes'] = {'key2': 'value2'} + set_module_args(data) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = vlan() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py new file mode 100644 index 000000000..926dda90b --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py @@ -0,0 +1,364 @@ +''' unit test for Ansible module: na_elementsw_volume.py ''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch +import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') + +from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_volume \ + import ElementSWVolume as my_module # module under test + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +CREATE_ERROR = 'create', 'some_error_in_create_volume' +MODIFY_ERROR = 'modify', 'some_error_in_modify_volume' +DELETE_ERROR = 'delete', 'some_error_in_delete_volume' + +POLICY_ID = 888 +POLICY_NAME = 'element_qos_policy_name' +VOLUME_ID = 777 +VOLUME_NAME = 'element_volume_name' + + +class MockSFConnection(object): + ''' mock connection to ElementSW host ''' + + class Bunch(object): # pylint: disable=too-few-public-methods + ''' create object with arbitrary attributes ''' + def __init__(self, **kw): + ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' + setattr(self, '__dict__', kw) + + def __init__(self, force_error=False, where=None, with_qos_policy_id=True): + ''' save arguments ''' + self.force_error = force_error + self.where = where + self.with_qos_policy_id = with_qos_policy_id + + def list_qos_policies(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build qos_policy list ''' + qos_policy_name = POLICY_NAME + qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000) + qos_policy = self.Bunch(name=qos_policy_name, qos_policy_id=POLICY_ID, qos=qos) + qos_policy_1 = self.Bunch(name=qos_policy_name + '_1', qos_policy_id=POLICY_ID + 1, qos=qos) + qos_policies = [qos_policy, qos_policy_1] + qos_policy_list = self.Bunch(qos_policies=qos_policies) + return qos_policy_list + + def list_volumes_for_account(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build volume list: volume.name, volume.id ''' + volume = self.Bunch(name=VOLUME_NAME, volume_id=VOLUME_ID, delete_time='') + volumes = [volume] + volume_list = self.Bunch(volumes=volumes) + return volume_list + + def list_volumes(self, *args, **kwargs): # pylint: disable=unused-argument + ''' build volume details: volume.name, volume.id ''' + if self.with_qos_policy_id: + qos_policy_id = POLICY_ID + else: + qos_policy_id = None + qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000) + volume = self.Bunch(name=VOLUME_NAME, volume_id=VOLUME_ID, delete_time='', access='rw', + account_id=1, qos=qos, qos_policy_id=qos_policy_id, total_size=1000000000, + attributes={'config-mgmt': 'ansible', 'event-source': 'na_elementsw_volume'} + ) + volumes = [volume] + volume_list = self.Bunch(volumes=volumes) + return volume_list + + def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument + ''' returns account_id ''' + if self.force_error and 'get_account_id' in self.where: + account_id = None + else: + account_id = 1 + account = self.Bunch(account_id=account_id) + result = self.Bunch(account=account) + return result + + def create_volume(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'create_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*CREATE_ERROR) + + def modify_volume(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + print("modify: %s, %s " % (repr(args), repr(kwargs))) + if self.force_error and 'modify_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*MODIFY_ERROR) + + def delete_volume(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'delete_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR) + + def purge_deleted_volume(self, *args, **kwargs): # pylint: disable=unused-argument + ''' We don't check the return code, but could force an exception ''' + if self.force_error and 'delete_exception' in self.where: + raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + ARGS = { + 'state': 'present', + 'name': VOLUME_NAME, + 'account_id': 'element_account_id', + 'qos': {'minIOPS': 1000, 'maxIOPS': 20000, 'burstIOPS': 20000}, + 'qos_policy_name': POLICY_NAME, + 'size': 1, + 'enable512e': True, + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_volume(self, mock_create_sf_connection): + ''' adding a volume ''' + args = dict(self.ARGS) # deep copy as other tests can modify args + args['name'] += '_1' # new name to force a create + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_or_modify_volume_idempotent_qos_policy(self, mock_create_sf_connection): + ''' adding a volume ''' + args = dict(self.ARGS) + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_add_or_modify_volume_idempotent_qos(self, mock_create_sf_connection): + ''' adding a volume ''' + args = dict(self.ARGS) + args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_volume(self, mock_create_sf_connection): + ''' removing a volume ''' + args = dict(self.ARGS) + args['state'] = 'absent' + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_volume_idempotent(self, mock_create_sf_connection): + ''' removing a volume ''' + args = dict(self.ARGS) + args['state'] = 'absent' + args['name'] += '_1' # new name to force idempotency + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_volume_qos(self, mock_create_sf_connection): + ''' modifying a volume ''' + args = dict(self.ARGS) + args['qos'] = {'minIOPS': 2000} + args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_volume_qos_policy_to_qos(self, mock_create_sf_connection): + ''' modifying a volume ''' + args = dict(self.ARGS) + args['qos'] = {'minIOPS': 2000} + args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_volume_qos_policy(self, mock_create_sf_connection): + ''' modifying a volume ''' + args = dict(self.ARGS) + args['qos_policy_name'] += '_1' + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_volume_qos_to_qos_policy(self, mock_create_sf_connection): + ''' modifying a volume ''' + args = dict(self.ARGS) + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_create_volume_exception(self, mock_create_sf_connection): + ''' creating a volume can raise an exception ''' + args = dict(self.ARGS) + args['name'] += '_1' # new name to force a create + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['create_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error provisioning volume: %s' % args['name'] + assert exc.value.args[0]['msg'].startswith(message) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_modify_volume_exception(self, mock_create_sf_connection): + ''' modifying a volume can raise an exception ''' + args = dict(self.ARGS) + args['qos'] = {'minIOPS': 2000} + args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error updating volume: %s' % VOLUME_ID + assert exc.value.args[0]['msg'].startswith(message) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_delete_volume_exception(self, mock_create_sf_connection): + ''' deleting a volume can raise an exception ''' + args = dict(self.ARGS) + args['state'] = 'absent' + args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['delete_exception']) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = 'Error deleting volume: %s' % VOLUME_ID + assert exc.value.args[0]['msg'].startswith(message) + + @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection') + def test_check_error_reporting_on_non_existent_qos_policy(self, mock_create_sf_connection): + ''' report error if qos option is not given on create ''' + args = dict(self.ARGS) + args['name'] += '_1' # new name to force a create + args.pop('qos') + args['qos_policy_name'] += '_2' + set_module_args(args) + # my_obj.sfe will be assigned a MockSFConnection object: + mock_create_sf_connection.return_value = MockSFConnection() + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print(exc.value.args[0]) + message = "Cannot find qos policy with name/id: %s" % args['qos_policy_name'] + assert exc.value.args[0]['msg'] == message diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py new file mode 100644 index 000000000..171a7bae5 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py @@ -0,0 +1,149 @@ +# Copyright (c) 2018 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp_module.py ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.netapp.elementsw.tests.unit.compat import unittest +from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule as na_helper + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def test_get_cd_action_create(self): + ''' validate cd_action for create ''' + current = None + desired = {'state': 'present'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result == 'create' + + def test_get_cd_action_delete(self): + ''' validate cd_action for delete ''' + current = {'state': 'absent'} + desired = {'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result == 'delete' + + def test_get_cd_action(self): + ''' validate cd_action for returning None ''' + current = None + desired = {'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result is None + + def test_get_modified_attributes_for_no_data(self): + ''' validate modified attributes when current is None ''' + current = None + desired = {'name': 'test'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + def test_get_modified_attributes(self): + ''' validate modified attributes ''' + current = {'name': ['test', 'abcd', 'xyz', 'pqr'], 'state': 'present'} + desired = {'name': ['abcd', 'abc', 'xyz', 'pqr'], 'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == desired + + def test_get_modified_attributes_for_intersecting_mixed_list(self): + ''' validate modified attributes for list diff ''' + current = {'name': [2, 'four', 'six', 8]} + desired = {'name': ['a', 8, 'ab', 'four', 'abcd']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abcd']} + + def test_get_modified_attributes_for_intersecting_list(self): + ''' validate modified attributes for list diff ''' + current = {'name': ['two', 'four', 'six', 'eight']} + desired = {'name': ['a', 'six', 'ab', 'four', 'abc']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abc']} + + def test_get_modified_attributes_for_nonintersecting_list(self): + ''' validate modified attributes for list diff ''' + current = {'name': ['two', 'four', 'six', 'eight']} + desired = {'name': ['a', 'ab', 'abd']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abd']} + + def test_get_modified_attributes_for_list_of_dicts_no_data(self): + ''' validate modified attributes for list diff ''' + current = None + desired = {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {} + + def test_get_modified_attributes_for_intersecting_list_of_dicts(self): + ''' validate modified attributes for list diff ''' + current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]} + desired = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]} + + def test_get_modified_attributes_for_nonintersecting_list_of_dicts(self): + ''' validate modified attributes for list diff ''' + current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]} + desired = {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + + def test_get_modified_attributes_for_list_diff(self): + ''' validate modified attributes for list diff ''' + current = {'name': ['test', 'abcd'], 'state': 'present'} + desired = {'name': ['abcd', 'abc'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['abc']} + + def test_get_modified_attributes_for_no_change(self): + ''' validate modified attributes for same data in current and desired ''' + current = {'name': 'test'} + desired = {'name': 'test'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + def test_is_rename_action_for_empty_input(self): + ''' validate rename action for input None ''' + source = None + target = None + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result == source + + def test_is_rename_action_for_no_source(self): + ''' validate rename action when source is None ''' + source = None + target = 'test2' + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is False + + def test_is_rename_action_for_no_target(self): + ''' validate rename action when target is None ''' + source = 'test2' + target = None + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is True + + def test_is_rename_action(self): + ''' validate rename action ''' + source = 'test' + target = 'test2' + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is False diff --git a/ansible_collections/netapp/elementsw/tests/unit/requirements.txt b/ansible_collections/netapp/elementsw/tests/unit/requirements.txt new file mode 100644 index 000000000..dde1958f1 --- /dev/null +++ b/ansible_collections/netapp/elementsw/tests/unit/requirements.txt @@ -0,0 +1 @@ +solidfire-sdk-python ; python_version >= '2.7' diff --git a/ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..e67eb0f94 --- /dev/null +++ b/ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,222 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to report a bug in netapp.ontap!** + + + ⚠ + Verify first that your issue is not [already reported on + GitHub][issue search] and keep in mind that we may have to keep + the current behavior because [every change breaks someone's + workflow][XKCD 1172]. + We try to be mindful about this. + + Also test if the latest release and devel branch are affected too. + + + **Tip:** If you are seeking community support, please consider + [Join our Slack community][ML||IRC]. + + + + [ML||IRC]: + https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg + + [issue search]: ../search?q=is%3Aissue&type=issues + + [XKCD 1172]: https://xkcd.com/1172/ + + +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with netapp.ontap from the devel branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + + + **Tip:** Cannot find it in this repository? Please be advised that + the source for some parts of the documentation are hosted outside + of this repository. If the page you are reporting describes + modules/plugins/etc that are not officially supported by the + Ansible Core Engineering team, there is a good chance that it is + coming from one of the [Ansible Collections maintained by the + community][collections org]. If this is the case, please make sure + to file an issue under the appropriate project there instead. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` below, under + the prompt line. Please don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + value: | + $ ansible --version + placeholder: | + $ ansible --version + ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) + config file = None + configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = ~/src/github/ansible/ansible/lib/ansible + ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections + executable location = bin/ansible + python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] + jinja version = 2.11.3 + libyaml = True + validations: + required: true + +- type: textarea + attributes: + label: ONTAP Collection Version + description: >- + ONTAP Collection Version. Run `ansible-galaxy collection` and copy the entire output + render: console + value: | + $ ansible-galaxy collection list + validations: + required: true + +- type: textarea + attributes: + label: ONTAP Version + description: >- + ONTAP Version. Run `version` on the cluster command line and copy the entire output + render: console + value: | + sridharc-vsim34::> version + NetApp Release 9.7.0: Tue May 18 10:45:16 UTC 2021 + validations: + required: true + +- type: textarea + attributes: + label: Playbook + description: >- + The task from the playbook that is give you the issue + render: console + validations: + required: true + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: | + 1. Implement the following playbook: + + ```yaml + --- + # ping.yml + - hosts: all + gather_facts: false + tasks: + - ping: + ... + ``` + 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv` + 3. An error occurs. + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y and was shocked + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output and don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + placeholder: >- + Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))} + Exception: + Traceback (most recent call last): + File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main + status = self.run(options, args) + File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run + wb.build(autobuilding=True) + File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build + self.requirement_set.prepare_files(self.finder) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files + ignore_dependencies=self.ignore_dependencies)) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file + session=self.session, hashes=hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url + hashes=hashes + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url + hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url + stream=True, + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get + return self.request('GET', url, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request + return super(PipSession, self).request(method, url, *args, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request + resp = self.send(prep, **send_kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send + r = adapter.send(request, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send + resp = super(CacheControlAdapter, self).send(request, **kw) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send + raise SSLError(e, request=request) + SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),)) + ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2. + ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1. + validations: + required: true + + +- type: markdown + attributes: + value: > + *One last thing...* + + + Thank you for your collaboration! + + +... diff --git a/ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..9c96953e3 --- /dev/null +++ b/ansible_collections/netapp/ontap/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,100 @@ +--- +name: ✨ Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to suggest a feature for netapp.ontap!** + + 💡 + Before you go ahead with your request, please first consider if it + would be useful for majority of the netapp.ontap users. As a + general rule of thumb, any feature that is only of interest to a + small sub group should be [implemented in a third-party Ansible + Collection][contribute to collections] or maybe even just your + project alone. Be mindful of the fact that the essential + netapp.ontap features have a broad impact. + + +
+ + ❗ Every change breaks someone's workflow. + + + + [![❗ Every change breaks someone's workflow. + ](https://imgs.xkcd.com/comics/workflow.png) + ](https://xkcd.com/1172/) +
+ + + ⚠ + Verify first that your idea is not [already requested on + GitHub][issue search]. + + Also test if the main branch does not already implement this. + + +- type: textarea + attributes: + label: Summary + description: > + Describe the new feature/improvement you would like briefly below. + + + What's the problem this feature will solve? + + What are you trying to do, that you are unable to achieve + with netapp.ontap as it currently stands? + + + * Provide examples of real-world use cases that this would enable + and how it solves the problem you described. + + * How do you solve this now? + + * Have you tried to work around the problem using other tools? + + * Could there be a different approach to solving this issue? + + placeholder: >- + I am trying to do X with netapp.ontap from the devel branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of netapp.ontap because of Z. + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: >- + I asked on https://stackoverflow.com/.... and the community + advised me to do X, Y and Z. + validations: + required: true + +... diff --git a/ansible_collections/netapp/ontap/.github/workflows/codeql-analysis.yml b/ansible_collections/netapp/ontap/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000..f9b5a90c9 --- /dev/null +++ b/ansible_collections/netapp/ontap/.github/workflows/codeql-analysis.yml @@ -0,0 +1,72 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main ] + schedule: + - cron: '15 16 * * 0' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + #queries: +security-and-quality + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/.github/workflows/coverage.yml b/ansible_collections/netapp/ontap/.github/workflows/coverage.yml new file mode 100644 index 000000000..6a79a156f --- /dev/null +++ b/ansible_collections/netapp/ontap/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.ontap Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on ONTAP + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.11 + run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/ontap/ + rsync -av . ansible_collections/netapp/ontap/ --exclude ansible_collections/netapp/ontap/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/ontap/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/ontap/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/ontap/ + verbose: true \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/.github/workflows/main.yml b/ansible_collections/netapp/ontap/.github/workflows/main.yml new file mode 100644 index 000000000..94353d961 --- /dev/null +++ b/ansible_collections/netapp/ontap/.github/workflows/main.yml @@ -0,0 +1,51 @@ +name: NetApp.ontap Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }} on ONTAP + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - stable-2.14 + - devel + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + # Ansible 2.14 requires 3.9 as a minimum + python-version: 3.9 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/ontap/ + rsync -av . ansible_collections/netapp/ontap/ --exclude ansible_collections/netapp/ontap/ + + + - name: Run sanity tests ONTAP + run: ansible-test sanity --docker -v --color + working-directory: ansible_collections/netapp/ontap/ + + - name: Run Unit Tests + run: ansible-test units --docker -v --color + working-directory: ansible_collections/netapp/ontap/ diff --git a/ansible_collections/netapp/ontap/CHANGELOG.rst b/ansible_collections/netapp/ontap/CHANGELOG.rst new file mode 100644 index 000000000..0a5b2167a --- /dev/null +++ b/ansible_collections/netapp/ontap/CHANGELOG.rst @@ -0,0 +1,2048 @@ +===================================== +NetApp ONTAP Collection Release Notes +===================================== + +.. contents:: Topics + + +v22.7.0 +======= + +Minor Changes +------------- + +- na_ontap_name_mappings - added choices ``s3_win`` and ``s3_unix`` to ``direction``, requires ONTAP 9.12.1 or later. +- na_ontap_s3_buckets - new option ``nas_path`` added, requires ONTAP 9.12.1 or later. + +Bugfixes +-------- + +- na_ontap_login_messages - fix ``banner`` and ``motd_message`` not idempotent when trailing '\n' is present. +- na_ontap_login_messages - fix idempotent issue on ``show_cluster_motd`` option when try to set banner or motd_message for the first time in REST. + +New Modules +----------- + +- netapp.ontap.na_ontap_active_directory_domain_controllers - NetApp ONTAP configure active directory preferred domain controllers + +v22.6.0 +======= + +Minor Changes +------------- + +- na_ontap_aggregate - new REST only option ``tags`` added, requires ONTAP 9.13.1 or later version. +- na_ontap_broadcast_domain - skip checking modify when ``state`` is absent. +- na_ontap_export_policy - added ``name`` to modify in module output if export policy is renamed. +- na_ontap_qos_policy_group - new REST only option ``adaptive_qos_options.block_size`` added, requires ONTAP 9.10.1 or later version. +- na_ontap_qos_policy_group - skip checking modify when ``state`` is absent. +- na_ontap_s3_buckets - new option ``type`` added, requires ONTAP 9.12.1 or later. +- na_ontap_volume - new REST only option ``tags`` added, requires ONTAP 9.13.1 or later version. +- retry create or modify when getting temporarily locked from changes error in REST. + +Bugfixes +-------- + +- na_ontap_export_policy - fix cannot delete export policy if ``from_name`` option is set. +- na_ontap_file_security_permissions_acl - fix idempotent issue on ``propagation_mode`` option. +- na_ontap_qos_adaptive_policy_group - rename group when from_name is present and state is present. +- na_ontap_qos_policy_group - one occurrence of msg missing in call to fail_json. +- na_ontap_s3_groups - fix cannot modify ``policies`` if not configured in create. +- na_ontap_s3_groups - fix error when current s3 groups has no users configured. +- na_ontap_security_certificates - fix duplicate entry error when ``vserver`` option is set with admin vserver. +- na_ontap_snapmirror_policy - fix cannot disable ``is_network_compression_enabled`` in REST. +- na_ontap_svm - skip modify validation when trying to delete svm. + +New Modules +----------- + +- netapp.ontap.na_ontap_kerberos_interface - NetApp ONTAP module to modify kerberos interface. + +v22.5.0 +======= + +Minor Changes +------------- + +- na_ontap_cifs - new options ``browsable`` and ``show_previous_versions`` added in REST. +- na_ontap_cifs - removed default value for ``unix_symlink`` as its not supported with ZAPI. +- na_ontap_cifs - updated documentation and examples for REST. +- na_ontap_file_security_permissions - updated module examples. +- na_ontap_ipspace - improved module fail error message in REST. +- na_ontap_rest_info - improved documentation for ``parameters`` option. +- na_ontap_security_config - updated documentation for ``supported_cipher_suites``. +- na_ontap_user - option ``vserver`` is not required with REST, ignore this option to create cluster scoped user. + +Bugfixes +-------- + +- na_ontap_cifs - throw error if set ``unix_symlink`` in ZAPI. +- na_ontap_cifs - throw error if used options that require recent ONTAP version. +- na_ontap_file_security_permissions - error if more than one desired ACLs has same user, access, access_control and apply_to. +- na_ontap_file_security_permissions - fix TypeError when current acls is None. +- na_ontap_file_security_permissions - fix idempotency issue on ``acls.propagation_mode`` option. +- na_ontap_ipspace - fix cannot delete ipspace if ``from_ipspace`` is present. +- na_ontap_iscsi_security - error module if use_rest never is set. +- na_ontap_iscsi_security - fix KeyError on ``outbound_username`` option. +- na_ontap_qtree - ignore job entry does not exist error when creating qtree with REST to bypass ONTAP issue with FSx. +- na_ontap_quotas - ignore job entry does not exist error when creating quota with REST to bypass ONTAP issue with FSx. +- na_ontap_security_config - fix error on specifying protocol version ``TLSv1.1`` when fips is enabled. +- na_ontap_snapmirror - Added option ``identity_preservation`` support from ONTAP 9.11.1 in REST. +- na_ontap_snapmirror - error if identity_preservation set in ZAPI. + +v22.4.1 +======= + +Bugfixes +-------- + +- na_ontap_snapmirror - fix invalid value error for return_timeout, modified the value to 120 seconds. + +v22.4.0 +======= + +Minor Changes +------------- + +- na_ontap_rest_cli - returns changed only for verbs POST, PATCH and DELETE. +- na_ontap_security_config - Added support for protocol version ``TLSV1.3``. +- na_ontap_security_config - Replaced private cli with REST API for GET and PATCH. +- na_ontap_security_config - new option ``supported_cipher_suites`` added in REST. +- na_ontap_snapmirror - new option ``identity_preservation`` added in REST. +- na_ontap_snapmirror - wait 600 seconds for snapmirror creation to complete in REST. +- na_ontap_user_role - ``command_directory_name`` requires 9.11.1 or later with REST. +- na_ontap_user_role - add support for rest-role ``privileges.access`` choices ``read_create``, ``read_modify`` and ``read_create_modify``, supported only with REST and requires ONTAP 9.11.1 or later versions. + +Bugfixes +-------- + +- na_ontap_interface - fix incorrect warning raised when try to rename interface. +- na_ontap_ldap_client - fix KeyError on ``name`` in ZAPI. +- na_ontap_ldap_client - fix duplicate entry error when used cluster vserver in REST. +- na_ontap_san_create - Role documentation correct to from nas to san +- na_ontap_user - fix KeyError vserver in ZAPI. +- na_ontap_user_role - report error when command/command directory path set in REST for ONTAP earlier versions. +- na_ontap_volume - fix error when try to unmount volume and modify snaplock attribute. +- na_ontap_volume - fix idempotent issue when try to offline and modify other volume options. +- na_ontap_vserver_audit - Added ``log_path`` option in modify. +- na_ontap_vserver_audit - fix invalid field value error of log retention count and duration. + +New Modules +----------- + +- netapp.ontap.na_ontap_ems_filter - NetApp ONTAP EMS Filter + +v22.3.0 +======= + +Minor Changes +------------- + +- na_ontap_aggregate - new option ``allow_flexgroups`` added. +- na_ontap_cifs - new options ``access_based_enumeration``, ``change_notify``, ``encryption``, ``home_directory``, ``oplocks``, ``show_snapshot``, ``allow_unencrypted_access``, ``namespace_caching`` and ``continuously_available`` added in REST. +- na_ontap_dns - ``skip_validation`` option requires 9.9.1 or later with REST and ignored for cluster DNS operations. +- na_ontap_dns - support cluster scope for modify and delete. +- na_ontap_interface - do not attempt to migrate FC interface if desired ``home_port``, ``home_node`` and ``current_port``, ``current_node`` are same. +- na_ontap_license - support for NLF v2 license files. +- na_ontap_nfs - new options ``root``, ``windows`` and ``security`` added in REST. +- na_ontap_user_role - ``command_directory_name`` is required if ``privileges`` not set in REST. +- na_ontap_user_role - ``path`` is required if ``privileges`` set in REST. +- na_ontap_volume_efficiency - REST support for ``policy`` requires 9.7 or later, ``path`` requires 9.9.1 or later and ``volume_efficiency`` and ``start_ve_scan_old_data`` requires 9.11.1 or later. +- na_ontap_volume_efficiency - ``schedule``, ``start_ve_scan_all``, ``start_ve_build_metadata``, ``start_ve_delete_checkpoint``, ``start_ve_queue_operation``, ``start_ve_qos_policy`` and ``stop_ve_all_operations`` options are not supported with REST. +- na_ontap_volume_efficiency - new option ``volume_name`` added. +- na_ontap_volume_efficiency - updated private cli with REST API. + +Bugfixes +-------- + +- na_ontap_aggregate - try to offline aggregate when disk add operation is in progress in ZAPI. +- na_ontap_interface - fix idempotency issue when ``home_port`` not set in creating FC interface. +- na_ontap_rest_info - fix field issue with private/cli and support/autosupport/check APIs. +- na_ontap_snapshot - fix cannot modify ``snapmirror_label``, ``expiry_time`` and ``comment`` if not configured in create. +- na_ontap_user_role - fix AttributeError 'NetAppOntapUserRole' object has no attribute 'name'. +- na_ontap_user_role - fix KeyError on ``vserver``, ``command_directory_name`` in ZAPI and ``path``, ``query`` in REST. +- na_ontap_user_role - fix duplicate entry error in ZAPI. +- na_ontap_user_role - fix entry does not exist error when trying to delete privilege in REST. +- na_ontap_volume_efficiency - fix idempotent issue when state is absent and efficiency options are set in ZAPI. + +New Modules +----------- + +- netapp.ontap.na_ontap_vserver_audit - NetApp Ontap - create, delete or modify vserver audit configuration. +- netapp.ontap.na_ontap_vserver_peer_permissions - NetApp Ontap - create, delete or modify vserver peer permission. + +v22.2.0 +======= + +Minor Changes +------------- + +- na_ontap_active_directory - REST requires ONTAP 9.12.1 or later. +- na_ontap_active_directory - add ``fqdn`` as aliases for ``domain``. +- na_ontap_interface - new option ``fail_if_subnet_conflicts`` - requires REST and ONTAP 9.11.1 or later. +- na_ontap_interface - option ``subnet_name`` is now supported with REST with ONTAP 9.11.1 or later. +- na_ontap_iscsi - new option ``target_alias`` added in REST. +- na_ontap_snapmirror - support ``schedule`` with REST and ONTAP 9.11.1, add alias ``transfer_schedule``. +- na_ontap_snapmirror_policy - Added new choices sync and async for policy type in REST. +- na_ontap_snapmirror_policy - Added unsupported options in ZAPI. +- na_ontap_snapmirror_policy - add support for cluster scoped policy with REST. +- na_ontap_snapmirror_policy - new option ``copy_latest_source_snapshot``, ``create_snapshot_on_source`` and ``sync_type`` added in REST. +- na_ontap_snapmirror_policy - new option ``transfer_schedule`` for async policy types. +- na_ontap_snapmirror_policy - warn when replacing policy type ``async_mirror``, ``mirror_vault`` and ``vault`` with policy type ``async`` and ``strict_sync_mirror``, ``sync_mirror`` with ``sync`` in REST. +- na_ontap_svm - warn in case of mismatch in language option spelling. + +Bugfixes +-------- + +- na_ontap_quotas - fix duplicate entry error when trying to add quota rule in REST. +- na_ontap_quotas - fix entry does not exist error when trying to modify quota status in REST. +- na_ontap_security_ipsec_policy - fix KeyError on ``authentication_method``. +- na_ontap_security_ipsec_policy - fix cannot get current security IPsec policy with ipspace. +- na_ontap_security_key_manager - requires 9.7+ to work with REST. +- na_ontap_snapmirror_policy - deleting all retention rules would trigger an error when the existing policy requires at least one rule. +- na_ontap_snapmirror_policy - fix desired policy type not configured in cli with REST. +- na_ontap_snapmirror_policy - index error on rules with ONTAP 9.12.1 as not all fields are present. +- na_ontap_volume -- fixed bug preventing unmount and taking a volume off line at the same time + +New Modules +----------- + +- netapp.ontap.na_ontap_cifs_local_user - NetApp ONTAP local CIFS user. + +v22.1.0 +======= + +Minor Changes +------------- + +- na_ontap_aggregate - add ``name`` to modify in module output if aggregate is renamed. +- na_ontap_aggregate - add support for ``service_state`` option from ONTAP 9.11.1 or later in REST. +- na_ontap_aggregate - error if ``unmount_volumes`` set in REST, by default REST unmount volumes when trying to offline aggregate. +- na_ontap_aggregate - fix examples in documentation. +- na_ontap_cifs_local_group_member - Added REST API support to retrieve, add and remove CIFS group member. +- na_ontap_cifs_local_group_member - REST support is from ONTAP 9.10.1 or later. +- na_ontap_cifs_server - skip ``service_state`` option if not set in create. +- na_ontap_interface - error when try to migrate fc interface in REST. +- na_ontap_interface - new option ``probe_port`` for Azure load balancer. +- na_ontap_quotas - for qtree type, allow quota_target in path format /vol/vol_name/qtree_name in REST. +- na_ontap_snapmirror_policy - new option ``copy_all_source_snapshots`` added in REST. +- na_ontap_volume - report error if vserver does not exist or is not a data vserver on create. + +Bugfixes +-------- + +- na_ontap_active_directory - updated doc as only ZAPI is supported at present, force an error with use_rest always. +- na_ontap_aggregate - allow adding disks before trying to offline aggregate. +- na_ontap_aggregate - fix ``service_state`` option skipped if its set to offline in create. +- na_ontap_cg_snapshot - updated doc with deprecation warning as it is a ZAPI only module. +- na_ontap_cifs_server - fix ``service_state`` is stopped when trying to modify cifs server in REST. +- na_ontap_file_directory_policy - updated doc with deprecation warning as it is a ZAPI only module. +- na_ontap_file_security_permissions - updated notes to indicate ONTAP 9.9.1 or later is required. +- na_ontap_file_security_permissions_acl - updated notes to indicate ONTAP 9.9.1 or later is required. +- na_ontap_interface - fix cannot set ``location.node.name`` and ``location.home_node.name`` error when creating or modifying fc interface. +- na_ontap_interface - fix unexpected argument error with ``ipspace`` when trying to get fc interface. +- na_ontap_qtree - fix cannot get current qtree if enclosed in curly braces. +- na_ontap_quota_policy - updated doc with deprecation warning as it is a ZAPI only module. +- na_ontap_quotas - fix default tree quota rule gets modified when ``quota_target`` is set in REST. +- na_ontap_quotas - fix user/group quota rule without qtree gets modified when ``qtree`` is set. +- na_ontap_snapmirror_policy - fixed idempotency issue on ``identity_preservation`` option when using REST. +- na_ontap_svm_options - updated doc with deprecation warning as it is a ZAPI only module. + +New Modules +----------- + +- netapp.ontap.na_ontap_cifs_local_group - NetApp Ontap - create, delete or modify CIFS local group. +- netapp.ontap.na_ontap_security_ipsec_ca_certificate - NetApp ONTAP module to add or delete ipsec ca certificate. +- netapp.ontap.na_ontap_security_ipsec_config - NetApp ONTAP module to configure IPsec config. +- netapp.ontap.na_ontap_security_ipsec_policy - NetApp ONTAP module to create, modify or delete security IPsec policy. + +v22.0.1 +======= + +Minor Changes +------------- + +- na_ontap_interface - allow setting ``netmask`` with netmask length in ZAPI. + +Bugfixes +-------- + +- na_ontap_interface - fix ``netmask`` not idempotent in REST. +- na_ontap_mcc_mediator - Fix error that would prevent mediator deletion, + +v22.0.0 +======= + +Minor Changes +------------- + +- na_ontap_autosupport_invoke - warn when ``message`` alias is used as it will be removed - it conflicts with Ansible internal variable. +- na_ontap_debug - report python executable version and path. +- na_ontap_export_policy_rule - ``allow_device_creation`` and ``chown_mode`` is now supported in ZAPI. +- na_ontap_export_policy_rule - ``allow_suid``, ``allow_device_creation`` and ``chown_mode`` is now supported from ONTAP 9.9.1 or later in REST. +- na_ontap_ldap_client - new option ``skip_config_validation``. +- na_ontap_login_message - warn when ``message`` alias is used as it will be removed - it conflicts with Ansible internal variable. +- na_ontap_motd - warn when ``message`` alias is used as it will be removed - it conflicts with Ansible internal variable. +- na_ontap_net_routes - ``metric`` option is supported from ONTAP 9.11.0 or later in REST. +- na_ontap_nfs - warn when ``nfsv4.1`` alias is used as it will be removed - it does not match Ansible naming convention. +- na_ontap_rest_info - support added for protocols/active-directory. +- na_ontap_rest_info - support added for protocols/cifs/group-policies. +- na_ontap_rest_info - support added for protocols/nfs/connected-client-settings. +- na_ontap_rest_info - support added for security/aws-kms. +- na_ontap_service_policy - new options ``known_services`` and ``additional_services``. +- na_ontap_service_policy - update services for 9.11.1 - make it easier to add new services. +- na_ontap_snapmirror - ``schedule`` is handled through ``policy`` for REST. +- na_ontap_snapmirror_policy - ``name`` added as an alias for ``policy_name``. +- na_ontap_snapmirror_policy - improve error reporting and report errors in check_mode. +- na_ontap_snapmirror_policy - new option ``identity_preservation`` added. +- na_ontap_volume - ``wait_for_completion`` and ``check_interval`` is now supported for volume move and encryption in REST. +- na_ontap_volume - new REST option ``analytics`` added. +- na_ontap_volume - new option ``max_wait_time`` added. +- tracing - allow to selectively trace headers and authentication. + +Bugfixes +-------- + +- iso8601 filters - fix documentation generation issue. +- na_ontap_firmware_upgrade - when enabled, disruptive_update would always update even when update is not required. +- na_ontap_info - Added vserver in key_fields of net_interface_info. +- na_ontap_interface - fix error where an ``address`` with an IPV6 ip would try to modify each time playbook was run. +- na_ontap_ldap_client - ``servers`` not accepted when using ZAPI and ``ldap_servers`` not handling a single server properly. +- na_ontap_rest_info - fixed error where module would fail silently when using ``owning_resouce`` and a non-existent vserver. +- na_ontap_user_role - fixed Invalid JSON input. Expecting "privileges" to be an array. +- na_ontap_volume - ``snapdir_access`` is not supported by REST and will currently inform you now if you try to use it with REST. +- na_ontap_volume - fix KeyError on ``aggregate_name`` when trying to unencrypt volume in ZAPI. +- na_ontap_volume - fix error when trying to move encrypted volume and ``encrypt`` is True in REST. +- na_ontap_volume - fix error when trying to unencrypt volume in REST. +- na_ontap_volume - when deleting a volume, don't report a warning when unmount is successful (error is None). +- tracing - redact headers and authentication secrets by default. + +New Modules +----------- + +- netapp.ontap.na_ontap_bgp_peer_group - NetApp ONTAP module to create, modify or delete bgp peer group. +- netapp.ontap.na_ontap_file_security_permissions - NetApp ONTAP NTFS file security permissions +- netapp.ontap.na_ontap_file_security_permissions_acl - NetApp ONTAP file security permissions ACL +- netapp.ontap.na_ontap_local_hosts - NetApp ONTAP local hosts +- netapp.ontap.na_ontap_name_mappings - NetApp ONTAP name mappings + +v21.24.1 +======== + +Bugfixes +-------- + +- new meta/execution-environment.yml is failing ansible-builder sanitize step. + +v21.24.0 +======== + +Minor Changes +------------- + +- All REST GET's up to and including 9.11.1 that do not require a UUID/KEY to be past in are now supported +- na_ontap_cluster - ``timezone.name`` to modify cluster timezone. REST only. +- na_ontap_ems_destination - improve error messages - augment UT coverage (thanks to bielawb). +- na_ontap_interface - ``dns_domain_name`` is now supported from ONTAP 9.9 or later in REST. +- na_ontap_interface - ``is_dns_update_enabled`` is now supported from ONTAP 9.9.1 or later in REST. +- na_ontap_interface - attempt to set interface_type to ``ip`` when ``protocols`` is set to "none". +- na_ontap_net_subnet - added REST support. +- na_ontap_quotas - Added REST support. +- na_ontap_rest_info - Allowed the support of multiple subsets and warn when using ``**`` in fields. +- na_ontap_rest_info - added support for ``network/ip/subnets``. +- na_ontap_rest_info - support added for cluster. +- na_ontap_rest_info - support added for cluster/counter/tables. +- na_ontap_rest_info - support added for cluster/licensing/capacity-pools. +- na_ontap_rest_info - support added for cluster/licensing/license-managers. +- na_ontap_rest_info - support added for cluster/metrocluster/svms. +- na_ontap_rest_info - support added for cluster/sensors. +- na_ontap_rest_info - support added for name-services/cache/group-membership/settings. +- na_ontap_rest_info - support added for name-services/cache/host/settings. +- na_ontap_rest_info - support added for name-services/cache/netgroup/settings. +- na_ontap_rest_info - support added for name-services/cache/setting. +- na_ontap_rest_info - support added for name-services/cache/unix-group/settings. +- na_ontap_rest_info - support added for name-services/ldap-schemas. +- na_ontap_rest_info - support added for network/fc/fabrics. +- na_ontap_rest_info - support added for network/fc/interfaces. +- na_ontap_rest_info - support added for network/fc/interfaces. +- na_ontap_rest_info - support added for network/ip/subnets. +- na_ontap_rest_info - support added for protocols/cifs/connections. +- na_ontap_rest_info - support added for protocols/cifs/netbios. +- na_ontap_rest_info - support added for protocols/cifs/session/files. +- na_ontap_rest_info - support added for protocols/cifs/shadow-copies. +- na_ontap_rest_info - support added for protocols/cifs/shadowcopy-sets. +- na_ontap_rest_info - support added for protocols/nfs/connected-client-maps. +- na_ontap_rest_info - support added for security. +- na_ontap_rest_info - support added for security/multi-admin-verify. +- na_ontap_rest_info - support added for security/multi-admin-verify/approval-groups. +- na_ontap_rest_info - support added for security/multi-admin-verify/requests. +- na_ontap_rest_info - support added for security/multi-admin-verify/rules. +- na_ontap_rest_info - support added for storage/file/moves. +- na_ontap_rest_info - support added for storage/pools. +- na_ontap_restit - support multipart/form-data for read and write. +- na_ontap_security_ssh - Updates the SSH server configuration for the specified SVM - REST only. +- na_ontap_snmp_traphosts - Added ``host`` option in REST. +- na_ontap_svm - Added ``ndmp`` option to services in REST. +- na_ontap_vserver_create - ``firewall_policy`` is not set when ``service_policy`` is present, as ``service_policy`` is preferred. +- na_ontap_vserver_create - ``protocol`` is now optional. ``role`` is not set when protocol is absent. +- na_ontap_vserver_create - added ``interface_type``. Only a value of ``ip`` is currently supported. +- na_ontap_vserver_create - added support for vserver management interface when using REST. + +Bugfixes +-------- + +- na_ontap_cifs - fix KeyError on ``unix_symlink`` field when using REST. +- na_ontap_cifs_acl - use ``type`` when deleting unix-user or unix-group from ACL in ZAPI. +- na_ontap_command - do not run command in check_mode (thanks to darksoul42). +- na_ontap_ems_destination - fix idempotency issue when ``type`` value is rest_api. +- na_ontap_interface - improve error message when interface type is required with REST. +- na_ontap_qtree - fix KeyError on unix_permissions. +- na_ontap_rest_cli - do not run command in check_mode (thanks to darksoul42). +- na_ontap_s3_groups - if `policies` is None module should no longer fail +- na_ontap_user - fix idempotency issue with 9.11 because of new is_ldap_fastbind field. +- na_ontap_volume_efficiency - Missing fields in REST get should return None and not crash module. + +New Modules +----------- + +- netapp.ontap.na_ontap_security_ssh - NetApp ONTAP security ssh + +v21.23.0 +======== + +Minor Changes +------------- + +- all REST modules - new option ``force_ontap_version`` to bypass permission issues with custom vsadmin roles. +- na_ontap_cifs_local_user_set_password - Added REST support. +- na_ontap_cluster_ha - added REST support. +- na_ontap_export_policy_rule - ``rule_index`` is now optional for create and delete. +- na_ontap_export_policy_rule - new option ``force_delete_on_first_match`` to support duplicate entries on delete. +- na_ontap_interface - improved validations for unsupported options with FC interfaces. +- na_ontap_kerberos_realm - added REST support. +- na_ontap_kerberos_realm - change ``kdc_port`` option type to int. +- na_ontap_lun_copy - added REST support. +- na_ontap_lun_map_reporting_nodes - added REST support. +- na_ontap_ntp - for ONTAP version 9.6 or below fall back to ZAPI when ``use_rest`` is set to ``auto`` or fail when REST is desired. +- na_ontap_ntp_key - fail for ONTAP version 9.6 or below when ``use_rest`` is set to ``auto`` or when REST is desired. +- na_ontap_rest_info - new option ``ignore_api_errors`` to report error in subset rather than breaking execution. +- na_ontap_rest_info - support added for protocols/vscan/on-access-policies. +- na_ontap_rest_info - support added for protocols/vscan/on-demand-policies. +- na_ontap_rest_info - support added for protocols/vscan/scanner-pools. +- na_ontap_security_key_manager - added REST support. +- na_ontap_security_key_manager - new REST option ``onboard`` for onboard key manager. +- na_ontap_security_key_manager - new REST options ``external`` and ``vserver`` for external key manager. +- na_ontap_ucadapter - added REST support. +- na_ontap_user_role -- added REST support. +- na_ontap_volume - attempt to delete volume even when unmounting or offlining failed. + +Bugfixes +-------- + +- na_ontap_cifs_acl - use ``type`` if present when fetching existing ACL with ZAPI. +- na_ontap_cifs_local_user_set_password - when using ZAPI, do not require cluster admin privileges. +- na_ontap_cluster_config Role - incorrect license was shown - updated to GNU General Public License v3.0 +- na_ontap_flexcache - properly use ``origin_cluster`` in GET but not in POST when using REST. +- na_ontap_kerberos_realm - fix cannot modify ``comment`` option in ZAPI. +- na_ontap_lun_copy - fix key error on ``source_vserver`` option. +- na_ontap_ntp - fixed typeError on ``key_id`` field with ZAPI. +- na_ontap_s3_buckets - fix TypeError if ``conditions`` not present in policy statements. +- na_ontap_s3_buckets - fix options that cannot be modified if not set in creating s3 buckets. +- na_ontap_s3_buckets - updated correct choices in options ``audit_event_selector.access`` and ``audit_event_selector.permission``. + +New Modules +----------- + +- netapp.ontap.na_ontap_ems_destination - NetApp ONTAP configuration for EMS event destination + +v21.22.0 +======== + +Minor Changes +------------- + +- all modules - do not fail on ZAPI EMS log when vserver does not exist. +- na_ontap_job_schedule - new option ``cluster`` added. +- na_ontap_ldap - fall back to ZAPI when ``use_rest`` is set to ``auto`` or fail when REST is desired. +- na_ontap_ldap_client - Added REST support. +- na_ontap_ldap_client - Added ``ldaps_enabled`` option in ZAPI. +- na_ontap_license - return list of updated package names. +- na_ontap_name_service_switch - added REST support. +- na_ontap_nvme_subsystem - report subsystem as absent if vserver cannot be found when attempting a delete. +- na_ontap_rest_info -- Will now include a message in return output about ``gather_subset`` not supported by your version of ONTAP. +- na_ontap_rest_info -- Will now warn you if a ``gather_subset`` is not supported by your version of ONTAP. +- na_ontap_security_key_manager - indicate that ``node`` is not used and is deprecated. +- na_ontap_software_update - deleting a software package is now supported with ZAPI and REST. +- na_ontap_svm - added vserver as a convenient alias for name when using module_defaults. +- na_ontap_wait_for_condition - added REST support. +- na_ontap_wait_for_condition - added ``snapmirror_relationship`` to wait on ``state`` or ``transfer_state`` (REST only). + +Bugfixes +-------- + +- na_ontap_cluster_peer - report an error if there is an attempt to use the already peered clusters. +- na_ontap_interface - fix error deleting fc interface if it is enabled in REST. +- na_ontap_license - fix intermittent KeyError when adding licenses with REST. +- na_ontap_lun - Added ``lun_modify`` after ``app_modify`` to fix idempotency issue. +- na_ontap_name_service_switch - fix AttributeError 'NoneType' object has no attribute 'get_children' if ``sources`` is '-' in current. +- na_ontap_name_service_switch - fix idempotency issue on ``sources`` option. +- na_ontap_security_key_manager - fix KeyError on ``node``. +- na_ontap_service_processor_network - allow manually configuring network if all of ``ip_address``, ``netmask``, ''gateway_ip_address`` set and ``dhcp`` not present in REST. +- na_ontap_service_processor_network - fail module when trying to disable ``dhcp`` and not settting one of ``ip_address``, ``netmask``, ``gateway_ip_address`` different than current. +- na_ontap_service_processor_network - fix ``wait_for_completion`` ignored when trying to enable service processor network interface in ZAPI. +- na_ontap_service_processor_network - fix idempotency issue on ``dhcp`` option in ZAPI. +- na_ontap_service_processor_network - fix setting ``dhcp`` v4 takes more than ``wait_for_completion`` retries. +- na_ontap_software_update - improve error handling if image file is already present. +- na_ontap_software_update - improve error handling when node is rebooting with REST. +- na_ontap_software_update - when using REST with ONTAP 9.9 or later, timeout value is properly set. +- na_ontap_user - enforce that all methods are under a single application. +- na_ontap_user - is_locked was not properly read with ZAPI, making the module not idempotent. + +v21.21.0 +======== + +Minor Changes +------------- + +- na_ontap_cluster_config role - support ``broadcast_domain`` and ``service_policy`` with REST. +- na_ontap_info - add computed serial_hex and naa_id for lun_info. +- na_ontap_info - add quota-policy-info. +- na_ontap_interface - support ``broadcast_domain`` with REST. +- na_ontap_login_messages - support cluster scope when using REST. +- na_ontap_lun - support ``qos_adaptive_policy_group`` with REST. +- na_ontap_motd - deprecated in favor of ``na_ontap_login_messages``. Fail when use_rest is set to ``always`` as REST is not supported. +- na_ontap_ntp - new option ``key_id`` added. +- na_ontap_qtree - Added ``unix_user`` and ``unix_group`` options in REST. +- na_ontap_rest_info - add computed serial_hex and naa_id for storage/luns when serial_number is present. +- na_ontap_s3_users - ``secret_key`` and ``access_token`` are now returned when creating a user. +- na_ontap_service_processor_network - Added REST support. +- na_ontap_snapmirror - improve errror messages to be more specific and consistent. +- na_ontap_snapmirror - new option ``validate_source_path`` to disable this validation. +- na_ontap_snapmirror - validate source endpoint for ZAPI and REST, accounting for vserver local name. +- na_ontap_snapmirror - wait for the relationship to come back to idle after a resync. +- na_ontap_unix_group - added REST support. +- na_ontap_unix_user - Added REST support. +- na_ontap_unix_user - Added new option ``primary_gid`` aliased to ``group_id``. +- na_ontap_user - accept ``service_processor`` as an alias for ``service-processor`` with ZAPI, to be consistent with REST. +- na_ontap_volume - now defaults to REST with ``use_rest`` set to ``auto``, like every other module. ZAPI can be forced with ``use_rest`` set to ``never``. +- na_ontap_vserver_create role - support ``broadcast_domain``, ``ipspace``, and ``service_policy`` with REST. + +Bugfixes +-------- + +- na_ontap_interface - FC interfaces - home_node should not be sent as location.home_node. +- na_ontap_interface - FC interfaces - home_port is not supported for ONTAP 9.7 or earlier. +- na_ontap_interface - FC interfaces - scope is not supported. +- na_ontap_interface - FC interfaces - service_policy is not supported. +- na_ontap_interface - enforce requirement for address/netmask for interfaces other than FC. +- na_ontap_interface - fix idempotency issue for cluster scoped interfaces when using REST. +- na_ontap_interface - fix potential node and uuid issues with LIF migration. +- na_ontap_interface - ignore 'none' when using REST rather than reporting unexpected protocol. +- na_ontap_lun - catch ZAPI error on get LUN. +- na_ontap_lun - ignore resize error if no change was required. +- na_ontap_lun - report error if flexvol_name is missing when using ZAPI. +- na_ontap_net_subnet - fixed ``ipspace`` option ignored in getting net subnet. +- na_ontap_qtree - fix idempotency issue on ``unix_permissions`` option. +- na_ontap_s3_buckets - Module will not fail on create if no ``policy`` is given. +- na_ontap_s3_buckets - Module will set ``enabled`` during create. +- na_ontap_s3_buckets - Module work currently when ``sid`` is a number. +- na_ontap_snapmirror - fix potential issue when destination is using REST but source is using ZAPI. +- na_ontap_snapmirror - relax check for source when using REST. +- na_ontap_svm - KeyError on CIFS when using REST with ONTAP 9.8 or lower. +- na_ontap_volume - ``volume_security_style`` was not modified if other security options were present with ZAPI. +- na_ontap_volume - fix idempotency issue on ``unix_permissions`` option. +- na_ontap_vserver_create role - add rule index as it is now required. + +Known Issues +------------ + +- na_ontap_snapshot - added documentation to use UTC format for ``expiry_time``. + +New Modules +----------- + +- netapp.ontap.na_ontap_ntp_key - NetApp ONTAP NTP key +- netapp.ontap.na_ontap_s3_groups - NetApp ONTAP S3 groups +- netapp.ontap.na_ontap_s3_policies - NetApp ONTAP S3 Policies + +v21.20.0 +======== + +Minor Changes +------------- + +- na_ontap_aggregate - updated ``disk_types`` in documentation. +- na_ontap_cifs_server - Added ``security`` options in REST. +- na_ontap_export_policy_rule - Add ``from_rule_index`` for both REST and ZAPI. Change ``rule_index`` to required. +- na_ontap_nvme_namespace - Added REST support. +- na_ontap_nvme_subsystem - Added REST support. +- na_ontap_portset - Added REST support. +- na_ontap_snapmirror - new option ``peer_options`` to define source connection parameters. +- na_ontap_snapmirror - new option ``transferring_time_out`` to define how long to wait for transfer to complete on create or initialize. +- na_ontap_snapmirror - rewrite update for REST using POST to initiate transfer. +- na_ontap_snapmirror - when deleting, attempt to delete even when the relationship cannot be broken. +- na_ontap_software_update - added REST support. +- na_ontap_svm - Added documentation for ``allowed_protocol``, ndmp is default in REST. +- na_ontap_user - add support for SAML authentication_method. +- na_ontap_vscan_on_access_policy - Added REST support. +- na_ontap_vscan_on_access_policy - new REST options ``scan_readonly_volumes`` and ``only_execute_access`` added. +- na_ontap_vscan_on_demand_task - Added REST support. +- na_ontap_vserver_cifs_security - Added ``use_ldaps_for_ad_ldap`` and ``use_start_tls_for_ad_ldap`` as mutually exclusive in ZAPI. +- na_ontap_vserver_cifs_security - Added option ``encryption_required_for_dc_connections`` and ``use_ldaps_for_ad_ldap`` in ZAPI. +- na_ontap_vserver_cifs_security - fall back to ZAPI when ``use_rest`` is set to ``auto`` or fail when REST is desired. + +Bugfixes +-------- + +- na_ontap_autosupport - TypeError on ``ondemand_enabled`` field with ONTAP 9.11. +- na_ontap_autosupport - TypeError on ``support`` field with ONTAP 9.11. +- na_ontap_autosupport - fix idempotency issue on ``state`` field with ONTAP 9.11. +- na_ontap_cluster_config - fix the role to be able to create intercluster LIFs with REST (ipspace is required). +- na_ontap_interface - ignore ``vserver`` when using REST if role is one of 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. +- na_ontap_net_subnet - delete fails if ipspace is different than Default. +- na_ontap_nvme - fixed ``status_admin`` option is ignored if set to False when creating nvme service in REST. +- na_ontap_nvme - fixed invalid boolean value error for ``status_admin`` when creating nvme service in ZAPI. +- na_ontap_portset - fixed error when trying to remove partial ports from portset if igroups are bound to it. +- na_ontap_portset - fixed idempotency issue when ``ports`` has identical values. +- na_ontap_quotas - fix another quota operation is currently in progress issue. +- na_ontap_quotas - fix idempotency issue on ``threshold`` option. +- na_ontap_service_policy - fixed error in modify by changing resulting json of an existing record in REST. +- na_ontap_snapmirror - fix error in snapmirror restore by changing option ``clean_up_failure`` as optional when using ZAPI. +- na_ontap_snapmirror - fix issues where there was no wait on quiesce before aborting. +- na_ontap_snapmirror - fix issues where there was no wait on the relationship to end transferring. +- na_ontap_snapmirror - support for SSL certificate authentication for both sides when using ONTAP. +- na_ontap_snapmirror - when using REST with a policy, fix AttributeError - 'str' object has no attribute 'get'. +- na_ontap_snapmirror - when using ZAPI, wait for the relationship to be quiesced before breaking. +- na_ontap_software_update - now reports changed=False when the package is already present. +- na_ontap_user - fix idempotency issue with SSH with second_authentication_method. +- na_ontap_vscan_on_access_policy - fixed options ``filters``, ``file_ext_to_exclude`` and ``paths_to_exclude`` cannot be reset to empty values in ZAPI. +- na_ontap_zapit - fix failure in precluster mode. + +New Modules +----------- + +- netapp.ontap.na_ontap_s3_services - NetApp ONTAP S3 services +- netapp.ontap.na_ontap_s3_users - NetApp ONTAP S3 users + +v21.19.1 +======== + +Bugfixes +-------- + +- na_ontap_cluster_config - fix the role to be able to create intercluster LIFs with REST (ipspace is required). +- na_ontap_interface - ignore ``vserver`` when using REST if role is one of 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. +- na_ontap_nvme - fixed ``status_admin`` option is ignored if set to False when creating nvme service in REST. +- na_ontap_nvme - fixed invalid boolean value error for ``status_admin`` when creating nvme service in ZAPI. +- na_ontap_service_policy - fixed error in modify by changing resulting json of an existing record in REST. +- na_ontap_snapmirror - when using REST with a policy, fix AttributeError - 'str' object has no attribute 'get'. +- na_ontap_snapmirror - when using ZAPI, wait for the relationship to be quiesced before breaking. + +v21.19.0 +======== + +Minor Changes +------------- + +- na_ontap_cifs - Added ``unix_symlink`` option in REST. +- na_ontap_cifs_server - Added ``force`` option for create, delete and rename cifs server when using REST. +- na_ontap_cifs_server - Added ``from_name`` option to rename cifs server when using REST. +- na_ontap_igroup_initiator - Added REST support. +- na_ontap_interface - use REST when ``use_rest`` is set to ``auto``. +- na_ontap_iscsi - Added REST support. +- na_ontap_nvme - Added REST support. +- na_ontap_qos_adaptive_policy_group - warn about deprecation, fall back to ZAPI or fail when REST is desired. +- na_ontap_qos_policy_group - Added REST only supported option ``adaptive_qos_options`` for configuring adaptive policy. +- na_ontap_qos_policy_group - Added REST only supported option ``fixed_qos_options`` for configuring max/min throughput policy. +- na_ontap_qos_policy_group - Added REST support. +- na_ontap_quotas - support TB as a unit, update doc with size format description. +- na_ontap_rest_info - new option ``owning_resource`` for REST info that requires an owning resource. For instance volume for a snapshot +- na_ontap_rest_info - support added for protocols/nfs/export-policies/rules (Requires owning_resource to be set) +- na_ontap_rest_info - support added for storage/volumes/snapshots (Requires owning_resource to be set) +- na_ontap_rest_info REST API's with hyphens in the name will now be converted to underscores when ``use_python_keys`` is set to ``True`` so that YAML parsing works correctly. +- na_ontap_rest_info support added for application/consistency-groups +- na_ontap_rest_info support added for cluster/fireware/history +- na_ontap_rest_info support added for cluster/mediators +- na_ontap_rest_info support added for cluster/metrocluster/dr-groups +- na_ontap_rest_info support added for cluster/metrocluster/interconnects +- na_ontap_rest_info support added for cluster/metrocluster/operations +- na_ontap_rest_info support added for cluster/ntp/keys +- na_ontap_rest_info support added for cluster/web +- na_ontap_rest_info support added for name-services/local-hosts +- na_ontap_rest_info support added for name-services/unix-groups +- na_ontap_rest_info support added for name-services/unix-users +- na_ontap_rest_info support added for network/ethernet/switch/ports +- na_ontap_rest_info support added for network/fc/ports +- na_ontap_rest_info support added for network/http-proxy +- na_ontap_rest_info support added for network/ip/bgp/peer-groups +- na_ontap_rest_info support added for protocols/audit +- na_ontap_rest_info support added for protocols/cifs/domains +- na_ontap_rest_info support added for protocols/cifs/local-groups +- na_ontap_rest_info support added for protocols/cifs/local-users +- na_ontap_rest_info support added for protocols/cifs/sessions +- na_ontap_rest_info support added for protocols/cifs/unix-symlink-mapping +- na_ontap_rest_info support added for protocols/cifs/users-and-groups/privilege +- na_ontap_rest_info support added for protocols/file-access-tracing/events +- na_ontap_rest_info support added for protocols/file-access-tracing/filters +- na_ontap_rest_info support added for protocols/fpolicy +- na_ontap_rest_info support added for protocols/locks +- na_ontap_rest_info support added for protocols/ndmp +- na_ontap_rest_info support added for protocols/ndmp/nodes +- na_ontap_rest_info support added for protocols/ndmp/sessions +- na_ontap_rest_info support added for protocols/ndmp/svms +- na_ontap_rest_info support added for protocols/nfs/connected-clients +- na_ontap_rest_info support added for protocols/nfs/kerberos/interfaces +- na_ontap_rest_info support added for protocols/nvme/subsystem-controllers +- na_ontap_rest_info support added for protocols/nvme/subsystem-maps +- na_ontap_rest_info support added for protocols/s3/buckets +- na_ontap_rest_info support added for protocols/s3/services +- na_ontap_rest_info support added for protocols/san/iscsi/sessions +- na_ontap_rest_info support added for protocols/san/portsets +- na_ontap_rest_info support added for protocols/san/vvol-bindings +- na_ontap_rest_info support added for security/anti-ransomware/suspects +- na_ontap_rest_info support added for security/audit +- na_ontap_rest_info support added for security/audit/messages +- na_ontap_rest_info support added for security/authentication/cluster/ad-proxy +- na_ontap_rest_info support added for security/authentication/cluster/ldap +- na_ontap_rest_info support added for security/authentication/cluster/nis +- na_ontap_rest_info support added for security/authentication/cluster/saml-sp +- na_ontap_rest_info support added for security/authentication/publickeys +- na_ontap_rest_info support added for security/azure-key-vaults +- na_ontap_rest_info support added for security/certificates +- na_ontap_rest_info support added for security/gcp-kms +- na_ontap_rest_info support added for security/ipsec +- na_ontap_rest_info support added for security/ipsec/ca-certificates +- na_ontap_rest_info support added for security/ipsec/policies +- na_ontap_rest_info support added for security/ipsec/security-associations +- na_ontap_rest_info support added for security/key-manager-configs +- na_ontap_rest_info support added for security/key-managers +- na_ontap_rest_info support added for security/key-stores +- na_ontap_rest_info support added for security/login/messages +- na_ontap_rest_info support added for security/ssh +- na_ontap_rest_info support added for security/ssh/svms +- na_ontap_rest_info support added for storage/cluster +- na_ontap_rest_info support added for storage/file/clone/split-loads +- na_ontap_rest_info support added for storage/file/clone/split-status +- na_ontap_rest_info support added for storage/file/clone/tokens +- na_ontap_rest_info support added for storage/monitored-files +- na_ontap_rest_info support added for storage/qos/workloads +- na_ontap_rest_info support added for storage/snaplock/audit-logs +- na_ontap_rest_info support added for storage/snaplock/compliance-clocks +- na_ontap_rest_info support added for storage/snaplock/event-retention/operations +- na_ontap_rest_info support added for storage/snaplock/event-retention/policies +- na_ontap_rest_info support added for storage/snaplock/file-fingerprints +- na_ontap_rest_info support added for storage/snaplock/litigations +- na_ontap_rest_info support added for storage/switches +- na_ontap_rest_info support added for storage/tape-devices +- na_ontap_rest_info support added for support/auto-update +- na_ontap_rest_info support added for support/auto-update/configurations +- na_ontap_rest_info support added for support/auto-update/updates +- na_ontap_rest_info support added for support/configuration-backup +- na_ontap_rest_info support added for support/configuration-backup/backups +- na_ontap_rest_info support added for support/coredump/coredumps +- na_ontap_rest_info support added for support/ems/messages +- na_ontap_rest_info support added for support/snmp +- na_ontap_rest_info support added for support/snmp/users +- na_ontap_rest_info support added for svm/migrations +- na_ontap_volume_autosize - improve error reporting. + +Bugfixes +-------- + +- na_ontap_cifs - fixed `symlink_properties` option silently ignored for cifs share creation when using REST. +- na_ontap_cifs - fixed error in modifying comment if it is not set while creating CIFS share in REST. +- na_ontap_command - fix typo in example. +- na_ontap_interface - rename fails with 'inconsistency in rename action' for cluster interface with REST. +- na_ontap_login_messages - fix typo in examples for username. +- na_ontap_nfs - fix TypeError on NoneType as ``tcp_max_xfer_size`` is not supported in earlier ONTAP versions. +- na_ontap_nfs - fix ``Extra input`` error with ZAPI for ``is-nfsv4-enabled``. +- na_ontap_quotas - fix idempotency issue on ``disk_limit`` and ``soft_disk_limit``. +- na_ontap_service_policy - fix examples in documentation. +- na_ontap_volume - QOS policy was not set when using NAS application. +- na_ontap_volume - correctly warn when attempting to modify NAS application. +- na_ontap_volume - do not set encrypt on modify, as it is already handled with specialized ZAPI calls. +- na_ontap_volume - use ``time_out`` value when creating/modifying/deleting volumes with REST rathar than hardcoded value. + +New Modules +----------- + +- netapp.ontap.na_ontap_s3_buckets - NetApp ONTAP S3 Buckets + +v21.18.1 +======== + +Bugfixes +-------- + +- na_ontap_iscsi - fixed error starting iscsi service on vserver where Service, adapter, or operation already started. +- na_ontap_lun - Fixed KeyError on options ``force_resize``, ``force_remove`` and ``force_remove_fenced`` in Zapi. +- na_ontap_lun - Fixed ``force_remove`` option silently ignored in REST. +- na_ontap_snapshot_policy - Do not validate parameter when state is ``absent`` and fix KeyError on ``comment``. + +v21.18.0 +======== + +Minor Changes +------------- + +- na_ontap_cluster_config role - use na_ontap_login_messages as na_ontap_motd is deprecated. +- na_ontap_debug - report ansible version and ONTAP collection version. +- na_ontap_efficiency_policy - Added REST support. +- na_ontap_export_policy_rule - new option ``ntfs_unix_security`` for NTFS export UNIX security options added. +- na_ontap_lun - Added REST support. +- na_ontap_snapmirror -- Added more descriptive error messages for REST +- na_ontap_snapshot_policy - Added REST support to the na_ontap_snapshot_policy module. +- na_ontap_svm - add support for web services (ssl modify) - REST only with 9.8 or later. +- na_ontap_volume - add support for SnapLock - only for REST. +- na_ontap_volume - allow to modify volume after rename. +- na_ontap_volume - new option ``max_files`` to increase the inode count value. +- na_ontap_vserver_create role - support max_volumes option. + +Bugfixes +-------- + +- Fixed ONTAP minor version ignored in checking minimum ONTAP version. +- na_ontap_aggregate - Fixed error in delete aggregate if the ``disk_count`` is less than current disk count. +- na_ontap_autosupport - Fixed `partner_address` not working in REST. +- na_ontap_command - document that a READONLY user is not supported, even for show commands. +- na_ontap_disk_options - ONTAP 9.10.1 returns on/off rather than True/False. +- na_ontap_info - Fixes issue with na_ontap_info failing in 9.1 because of ``job-schedule-cluster``. +- na_ontap_iscsi - Fixed issue with ``start_state`` always being set to stopped when creating an ISCSI. +- na_ontap_lun_map - TypeError - '>' not supported between instances of 'int' and 'str '. +- na_ontap_qtree - Fixed issue with ``oplocks`` not being changed during a modify in Zapi. +- na_ontap_qtree - Fixed issue with ``oplocks`` not warning user about not being supported in REST +- na_ontap_snapmirror - Added use_rest condition for the REST support to work when use_rest `always`. +- na_ontap_snapshot - add error message if volume is not found with REST. +- na_ontap_snapshot - fix key error on volume when using REST. +- na_ontap_svm - fixed KeyError issue on protocols when vserver is stopped. +- na_ontap_volume - do not attempt to mount volume if current state is offline. +- na_ontap_volume - fix idempotency issue with compression settings when using REST. +- na_ontap_vserver_peer - Added cluster peer accept code in REST. +- na_ontap_vserver_peer - Fixed AttributeError if ``dest_hostname`` or ``peer_options`` not present. +- na_ontap_vserver_peer - Fixed ``local_name_for_peer`` and ``local_name_for_source`` options silently ignored in REST. +- na_ontap_vserver_peer - Get peer cluster name if remote peer exist else use local cluster name. +- na_ontap_vserver_peer - ignore job entry doesn't exist error with REST to bypass ONTAP issue with FSx. +- na_ontap_vserver_peer - report error if SVM peer does not see a peering relationship after create. + +v21.17.2 +======== + +Bugfixes +-------- + +- na_ontap_lun_map - Fixed bug when deleting lun map using REST. +- na_ontap_rest_info - Fixed an issues with adding field to specific info that didn't have a direct REST equivalent. + +v21.17.1 +======== + +Bugfixes +-------- + +- na_ontap_lun_map - fixed bugs resulting in REST support to not work. + +v21.17.0 +======== + +Minor Changes +------------- + +- all modules that only support ZAPI - warn when ``use_rest`` with a value of ``always`` is ignored. +- na_ontap_cifs_acl - Added REST support to the cifs share access control module. +- na_ontap_cifs_acl - new option ``type`` for user-group-type. +- na_ontap_cifs_share - Added REST support to the cifs share module. +- na_ontap_cluster_peer - Added REST support to the cluster_peer module. +- na_ontap_lun_map - Added REST support. +- na_ontap_nfs - Added Rest Support +- na_ontap_volume_clone - Added REST support. + +Bugfixes +-------- + +- na_ontap_aggregate - Fixed UUID issue when attempting to attach object store as part of creating the aggregate with REST. +- na_ontap_cifs_server - error out if ZAPI only options ``force`` or ``workgroup`` are used with REST. +- na_ontap_cluster_peer - Fixed KeyError if both ``source_intercluster_lifs`` and ``dest_intercluster_lifs`` not present in cluster create. +- na_ontap_rest_info - Fixed example with wrong indentation for ``use_python_keys``. + +v21.16.0 +======== + +Minor Changes +------------- + +- na_ontap_aggregate - Added REST support. +- na_ontap_aggregate - Added ``disk_class`` option for REST and ZAPI. +- na_ontap_aggregate - Extended accepted ``disk_type`` values for ZAPI. +- na_ontap_cifs_server - Added REST support to the cifs server module. +- na_ontap_ports - Added REST support to the ports module. +- na_ontap_snapmirror - Added REST support to the na_ontap_snapmirror module +- na_ontap_volume - ``logical_space_enforcement`` to specifies whether to perform logical space accounting on the volume. +- na_ontap_volume - ``logical_space_reporting`` to specifies whether to report space logically on the volume. +- na_ontap_volume - ``tiering_minimum_cooling_days`` to specify how many days must pass before inactive data in a volume using the Auto or Snapshot-Only policy is considered cold and eligible for tiering. +- na_ontap_volume_clone - Added REST support. + +Bugfixes +-------- + +- four modules (mediator, metrocluster, security_certificates, wwpn_alias) would report a None error when REST is not available. +- module_utils - fixed KeyError on Allow when using OPTIONS method and the API failed. +- na_ontap_active_directory - Fixed idempotency and traceback issues. +- na_ontap_aggregate - Fixed KeyError on unmount_volumes when offlining a volume if option is not set. +- na_ontap_aggregate - Report an error when attempting to change snaplock_type. +- na_ontap_igroup - ``force_remove_initiator`` option was ignored when removing initiators from existing igroup. +- na_ontap_info - Add active_directory_account_info. +- na_ontap_security_certificates - ``intermediate_certificates`` option was ignored. +- na_ontap_user - Fixed TypeError 'tuple' object does not support item assignment. +- na_ontap_user - Fixed issue when attempting to change pasword for absent user when set_password is set. +- na_ontap_user - Fixed lock state is not set if password is not changed. +- na_ontap_volume - Fixed error when creating a flexGroup when ``aggregate_name`` and ``aggr_list_multiplier`` are not set in rest. +- na_ontap_volume - Fixed error with unmounting junction_path in rest. +- na_ontap_volume - report error when attempting to change the nas_application tiering control from disalllowed to required, or reciprocally. + +v21.15.1 +======== + +Bugfixes +-------- + +- na_ontap_export_policy_rule - Fixed bug that prevent ZAPI and REST calls from working correctly + +v21.15.0 +======== + +Minor Changes +------------- + +- na_ontap_broadcast_domain - Added REST support to the broadcast domain module. +- na_ontap_broadcast_domain - new REST only option ``from_ipspace`` added. +- na_ontap_broadcast_domain_ports - warn about deprecation, fall back to ZAPI or fail when REST is desired. +- na_ontap_export_policy_rule -- Added Rest support for Export Policy Rules +- na_ontap_firmware_upgrade - REST support to download firmware and reboot SP. +- na_ontap_license - Added REST support to the license module. +- na_ontap_rest_info - update documention for `fields` to clarify the list of fields that are return by default. +- na_ontap_svm - new REST options of svm admin_state ``stopped`` and ``running`` added. + +Bugfixes +-------- + +- na_ontap_broadcast_domain - fix idempotency issue when ``ports`` has identical values. +- na_ontap_info - fix KeyError on node for aggr_efficiency_info option against a metrocluster system. +- na_ontap_volume - Fixed issue that would fail the module in REST when changing `is_online` if two vserver volume had the same name. +- na_ontap_volume - If using REST and ONTAP 9.6 and `efficiency_policy` module will fail as `efficiency_policy` is not supported in ONTAP 9.6. +- na_ontap_volume_efficiency - Removed restriction on policy name. + +v21.14.1 +======== + +Bugfixes +-------- + +- na_ontap_net_ifgrp - fix error in modify ports with zapi. + +v21.14.0 +======== + +Minor Changes +------------- + +- na_ontap_aggregate - new option ``encryption`` to enable encryption with ZAPI. +- na_ontap_fcp -- Added REST support for FCP +- na_ontap_net_ifgrp - Added REST support to the net ifgrp module. +- na_ontap_net_ifgrp - new REST only options ``from_lag_ports``, ``broadcast_domain`` and ``ipspace`` added. +- na_ontap_net_port - Added REST support to the net port module +- na_ontap_restit - new option ``wait_for_completion`` to support asynchronous operations and wait for job completion. +- na_ontap_volume - Added REST support to the volume module +- na_ontap_volume_efficiency - new option ``storage_efficiency_mode`` for AFF only with 9.10.1 or later. +- na_ontap_vserver_delete role - added set_fact to accept ``netapp_{hostname|username|password}`` or ``hostname,username and password`` variables. +- na_ontap_vserver_delete role - do not report an error if the vserver does not exist. +- na_ontap_vserver_peer - Added REST support to the vserver_peer module + +Bugfixes +-------- + +- fix error where module will fail for ONTAP 9.6 if use_rest was set to auto +- na_ontap_cifs_local_user_modify - KeyError on ``description`` or ``full_name`` with REST. +- na_ontap_cifs_local_user_modify - unexpected argument ``name`` error with REST. +- na_ontap_export_policy - fix error if more than 1 verser matched search name, the wrong uuid could be given +- na_ontap_net_routes - metric was not always modified with ZAPI. +- na_ontap_net_routes - support cluster-scoped routes with REST. +- na_ontap_vserver_delete role - report error if ONTAP version is 9.6 or older. + +v21.13.1 +======== + +Bugfixes +-------- + +- cluster scoped modules are failing on FSx with 'Vserver API missing vserver parameter' error. + +v21.13.0 +======== + +Minor Changes +------------- + +- PR15 - allow usage of Ansible module group defaults - for Ansible 2.12+. +- na_ontap_cluster - add ``force`` option when deleting a node. +- na_ontap_interface - Added REST support to the interface module (for IP and FC interfaces). +- na_ontap_net_vlan - Added REST support to the net vlan module. +- na_ontap_net_vlan - new REST options ``broadcast_domain``, ``ipspace`` and ``enabled`` added. +- na_ontap_object_store - new REST options ``owner`` and ``change_password``. +- na_ontap_object_store - support modifying an object store config with REST. + +Bugfixes +-------- + +- na_ontap_cluster - ``single_node_cluster`` was silently ignored with REST. +- na_ontap_cluster - switch to ZAPI when DELETE is required with ONTAP 9.6. +- na_ontap_snapmirror - ``source_path`` and ``source_hostname`` parameters are not mandatory to delete snapmirror relationship when source cluster is unknown, if specified it will delete snapmirror at destination and release the same at source side. if not, it only deletes the snapmirror at destination and will not look for source to perform snapmirror release. +- na_ontap_snapmirror - modify policy, schedule and other parameter failure are fixed. +- na_ontap_snapshot - ``expiry_time`` required REST api, will return error if set when using ZAPI. +- na_ontap_snapshot - ``snapmirror_label`` is supported with REST on ONTAP 9.7 or higher, report error if used on ONTAP 9.6. +- na_ontap_storage_failover - KeyError on 'ha' if the system is not configured as HA. +- na_ontap_svm - module will on init if a rest only and zapi only option are used at the same time. + +v21.12.0 +======== + +Minor Changes +------------- + +- na_ontap_cluster - Added REST support to the cluster module. +- na_ontap_firewall_policy - added ``none`` as a choice for ``service`` which is supported from 9.8 ONTAP onwards. +- na_ontap_svm - new option ``max_volumes``. +- na_ontap_svm - support ``allowed protocols`` with REST for ONTAP 9.6 and later. + +Bugfixes +-------- + +- na_ontap_job_schedule - cannot modify options not present in create when using REST. +- na_ontap_job_schedule - fix idempotency issue with ZAPI when job_minutes is set to -1. +- na_ontap_job_schedule - modify error if month is changed from some values to all (-1) when using REST. +- na_ontap_job_schedule - modify error if month is present but not changed with 0 offset when using REST. +- na_ontap_vserver_delete role - fix typos for cifs. + +v21.11.0 +======== + +Minor Changes +------------- + +- na_ontap_interface - new option ``from_name`` to rename an interface. +- na_ontap_ntp - Added REST support to the ntp module +- na_ontap_ntp - Added REST support to the ntp module +- na_ontap_software_update - new option ``validate_after_download`` to run ONTAP software update validation checks. +- na_ontap_software_update - remove ``absent`` as a choice for ``state`` as it has no use. +- na_ontap_svm - ignore ``aggr_list`` with ``'*'`` when using REST. +- na_ontap_svm - new option ``ignore_rest_unsupported_options`` to ignore older ZAPI options not available in REST. +- na_ontap_svm - new option ``services`` to allow and/or enable protocol services. + +Bugfixes +-------- + +- na_ontap_job_schedule - fix idempotency issue with REST when job_minutes is set to -1. +- na_ontap_ldap_client - remove limitation on schema so that custom schemas can be used. + +v21.10.0 +======== + +Minor Changes +------------- + +- na_ontap_cifs_server - ``force`` option is supported when state is absent to ignore communication errors. + +Bugfixes +-------- + +- all modules - traceback on ONTAP 9.3 (and earlier) when trying to detect REST support. +- na_ontap_vserver_delete role - delete iSCSI igroups and CIFS server before deleting vserver. + +v21.9.0 +======= + +Minor Changes +------------- + +- na_ontap_job_schedule - new option ``month_offset`` to explictly select 0 or 1 for January. +- na_ontap_object_store - new option ``port``, ``certificate_validation_enabled``, ``ssl_enabled`` for target server. +- na_ontap_rest_info - All Info that exist in ``na_ontap_info`` that has REST equivalents have been implemented. Note that the returned structure for REST and the variable names in the structure is different from the ZAPI based ``na_ontap_info``. Some default variables in ZAPI are no longer returned by default in REST and will need to be specified using the ``field`` option. +- na_ontap_rest_info - The Default for ``gather_subset`` has been changed to demo which returns ``cluster/software``, ``svm/svms``, ``cluster/nodes``. To return all Info must specificly list ``all`` in your playbook. Do note ``all`` is a very resource-intensive action and it is highly recommended to call just the info/APIs you need. +- na_ontap_rest_info - The following info subsets have been added ``system_node_info``, ``net_interface_info``, ``net_port_info``, ``security_login_account_info``, ``vserver_peer_info``, ``cluster_image_info``, ``cluster_log_forwarding_info``, ``metrocluster_info``, ``metrocluster_node_info``, ``net_dns_info``, ``net_interface_service_policy_info``, ``vserver_nfs_info``, ``clock_info``, ``igroup_info``, ``vscan_status_info``, ``vscan_connection_status_all_info``, ``storage_bridge_info``, ``nvme_info``, ``nvme_interface_info``, ``nvme_subsystem_info``, ``cluster_switch_info``, ``export_policy_info``, ``kerberos_realm_info``,``sis_info``, ``sis_policy_info``, ``snapmirror_info``, ``snapmirror_destination_info``, ``snapmirror_policy_info``, ``sys_cluster_alerts``, ``cifs_vserver_security_info`` +- na_ontap_rest_info - added file_directory_security to return the effective permissions of the directory. When using file_directory_security it must be called with gather_subsets and path and vserver must be specified in parameters. +- na_ontap_rest_info - new option ``use_python_keys`` to replace ``svm/svms`` with ``svm_svms`` to simplify post processing. +- na_ontap_snmp - Added REST support to the SNMP module + +Bugfixes +-------- + +- na_ontap_job_schedule - fix documentation for REST ranges for months. +- na_ontap_object_store - when using REST, wait for job status to correctly report errors. +- na_ontap_quotas - attempt to retry on ``13001:success`` ZAPI error. Add debug data. +- na_ontap_rest_cli - removed incorrect statement indicating that console access is required. + +v21.8.1 +======= + +Bugfixes +-------- + +- all REST modules - 9.4 and 9.5 were incorrectly detected as supporting REST. +- na_ontap_snapmirror - improve error message when option is not supported with ZAPI. + +v21.8.0 +======= + +Minor Changes +------------- + +- na_ontap_cluster_peer - new option ``peer_options`` to use different credentials on peer. +- na_ontap_debug - additional checks when REST is available to help debug vserver connectivity issues. +- na_ontap_flexcache - corrected module name in documentation Examples +- na_ontap_net_port - change option types to bool and int respectively for ``autonegotiate_admin`` and ``mtu``. +- na_ontap_net_port - new option ``up_admin`` to set administrative state. +- na_ontap_rest_info - add examples for ``parameters`` option. +- na_ontap_snapshot - add REST support to create, modify, rename, and delete snapshot. +- na_ontap_snapshot - new option ``expiry_time``. +- na_ontap_volume - show warning when resize is ignored because threshold is not reached. +- na_ontap_vserver_create role - add ``nfsv3``, ``nfsv4``, ``nfsv41`` options. +- na_ontap_vserver_peer - new option ``peer_options`` to use different credentials on peer. + +Bugfixes +-------- + +- all modules - fix traceback TypeError 'NoneType' object is not subscriptable when hostname points to a web server. +- na_ontap_cluster_peer - KeyError on dest_cluster_name if destination is unreachable. +- na_ontap_cluster_peer - KeyError on username when using certicate. +- na_ontap_export_policy_rule - change ``anonymous_user_id`` type to str to accept user name and user id. (A warning is now triggered when a number is not quoted.) +- na_ontap_volume_clone - ``parent_vserver`` can not be given with ``junction_path``, ``uid``, or ``gid`` +- na_ontap_vserver_peer - KeyError on username when using certicate. + +New Modules +----------- + +- netapp.ontap.na_ontap_cifs_local_user_set_password - NetApp ONTAP set local CIFS user password +- netapp.ontap.na_ontap_fdsd - NetApp ONTAP create or remove a File Directory security descriptor. +- netapp.ontap.na_ontap_fdsp - NetApp ONTAP create or delete a file directory security policy +- netapp.ontap.na_ontap_fdspt - NetApp ONTAP create, delete or modify File Directory security policy tasks +- netapp.ontap.na_ontap_fdss - NetApp ONTAP File Directory Security Set. +- netapp.ontap.na_ontap_partitions - NetApp ONTAP Assign partitions and disks to nodes. + +v21.7.0 +======= + +Minor Changes +------------- + +- License displayed correctly in Github +- na_ontap_cifs - new option ``comment`` to associate a description to a CIFS share. +- na_ontap_disks - added REST support for the module. +- na_ontap_disks - added functionality to reassign spare disks from a partner node to the desired node. +- na_ontap_disks - new option min_spares. +- na_ontap_lun - new suboption ``exclude_aggregates`` for SAN application. +- na_ontap_volume - new suboption ``exclude_aggregates`` for NAS application. + +Bugfixes +-------- + +- na_ontap_flexcache - one occurrence of msg missing in call to fail_json. +- na_ontap_igroup - one occurrence of msg missing in call to fail_json. +- na_ontap_igroups - nested igroups are not supported on ONTAP 9.9.0 but are on 9.9.1. +- na_ontap_iscsi_security - IndexError list index out of range if vserver does not exist +- na_ontap_iscsi_security - cannot change authentication_type +- na_ontap_lun - three occurrencse of msg missing in call to fail_json. +- na_ontap_lun_map_reporting_nodes - one occurrence of msg missing in call to fail_json. +- na_ontap_snapmirror - one occurrence of msg missing in call to fail_json. + +New Modules +----------- + +- netapp.ontap.na_ontap_publickey - NetApp ONTAP publickey configuration +- netapp.ontap.na_ontap_service_policy - NetApp ONTAP service policy configuration + +v21.6.1 +======= + +Bugfixes +-------- + +- na_ontap_autosupport - KeyError - No element by given name validate-digital-certificate. + +v21.6.0 +======= + +Minor Changes +------------- + +- na_ontap_rest_info - Added "autosupport_check_info"/"support/autosupport/check" to the attributes that will be collected when gathering info using the module. +- na_ontap_users - new option ``application_dicts`` to associate multiple authentication methods to an application. +- na_ontap_users - new option ``application_strs`` to disambiguate ``applications``. +- na_ontap_users - new option ``replace_existing_apps_and_methods``. +- na_ontap_users - new suboption ``second_authentication_method`` with ``application_dicts`` option. +- na_ontap_vserver_peer - new options ``local_name_for_source`` and ``local_name_for_peer`` added. + +Bugfixes +-------- + +- na_ontap_autosupport - TypeError - '>' not supported between instances of 'str' and 'list'. +- na_ontap_quotas - fail to reinitialize on create if quota is already on. + +v21.5.0 +======= + +Major Changes +------------- + +- na_ontap_autosupport - Added REST support to the module. + +Minor Changes +------------- + +- na_ontap_autosupport - new option ``local_collection_enabled`` to specify whether collection of AutoSupport data when the AutoSupport daemon is disabled. +- na_ontap_autosupport - new option ``max_http_size`` to specify delivery size limit for the HTTP transport protocol (in bytes). +- na_ontap_autosupport - new option ``max_smtp_size`` to specify delivery size limit for the SMTP transport protocol (in bytes). +- na_ontap_autosupport - new option ``nht_data_enabled`` to specify whether the disk health data is collected as part of the AutoSupport data. +- na_ontap_autosupport - new option ``ondemand_enabled`` to specify whether the AutoSupport OnDemand Download feature is enabled. +- na_ontap_autosupport - new option ``perf_data_enabled`` to specify whether the performance data is collected as part of the AutoSupport data. +- na_ontap_autosupport - new option ``private_data_removed`` to specify the removal of customer-supplied data. +- na_ontap_autosupport - new option ``reminder_enabled`` to specify whether AutoSupport reminders are enabled or disabled. +- na_ontap_autosupport - new option ``retry_count`` to specify the maximum number of delivery attempts for an AutoSupport message. +- na_ontap_autosupport - new option ``validate_digital_certificate`` which when set to true each node will validate the digital certificates that it receives. +- na_ontap_info - Added "autosupport_check_info" to the attributes that will be collected when gathering info using the module. + +Bugfixes +-------- + +- na_ontap_qtree - wait for completion when creating or modifying a qtree with REST. +- na_ontap_volume - ignore read error because of insufficient privileges for efficiency options so that the module can be run as vsadmin. + +v21.4.0 +======= + +Minor Changes +------------- + +- na_ontap_igroups - new option ``initiator_names`` as a replacement for ``initiators`` (still supported as an alias). +- na_ontap_igroups - new option ``initiator_objects`` to support initiator comments (requires ONTAP 9.9). +- na_ontap_lun - allow new LUNs to use different igroup or os_type when using SAN application. +- na_ontap_lun - ignore small increase (lower than provisioned) and small decrease (< 10%) in ``total_size``. +- na_ontap_node - added REST support for ONTAP node modify and rename. +- na_ontap_volume - warn when attempting to modify application only options. +- na_ontap_volume_efficiency - new option 'start_ve_build_metadata' scan the entire and generate fingerprint database. +- na_ontap_volume_efficiency - new option 'start_ve_delete_checkpoint' delete checkpoint and start the operation from the begining. +- na_ontap_volume_efficiency - new option 'start_ve_qos_policy' defines the QoS policy for the operation. +- na_ontap_volume_efficiency - new option 'start_ve_queue_operation' queue if an exisitng operation is already running. +- na_ontap_volume_efficiency - new option 'start_ve_scan_all' scan the entire volume without applying share block optimization. +- na_ontap_volume_efficiency - new option 'start_ve_scan_old_data' scan the file system to process all the existing data. +- na_ontap_volume_efficiency - new option 'stop_ve_all_operations' all running and queued operations to be stopped. +- na_ontap_volume_efficiency - new option to allow volume efficiency to be started and stopped 'volume_efficiency'. + +Bugfixes +-------- + +- na_ontap_autosupport - warn when password is present in ``proxy_url`` as it makes the operation not idempotent. +- na_ontap_cluster - ignore ZAPI EMS log error when in pre-cluster mode. +- na_ontap_lun - SAN application is not supported on 9.6 and only partially supported on 9.7 (no modify). +- na_ontap_svm - iscsi current status is not read correctly (mispelled issi). + +New Modules +----------- + +- netapp.ontap.na_ontap_cifs_local_user_modify - NetApp ONTAP modify local CIFS user. +- netapp.ontap.na_ontap_disk_options - NetApp ONTAP modify storage disk options +- netapp.ontap.na_ontap_fpolicy_event - NetApp ONTAP FPolicy policy event configuration +- netapp.ontap.na_ontap_fpolicy_ext_engine - NetApp ONTAP fPolicy external engine configuration. +- netapp.ontap.na_ontap_fpolicy_scope - NetApp ONTAP - Create, delete or modify an FPolicy policy scope configuration. +- netapp.ontap.na_ontap_fpolicy_status - NetApp ONTAP - Enables or disables the specified fPolicy policy +- netapp.ontap.na_ontap_snaplock_clock - NetApp ONTAP Sets the snaplock compliance clock. + +v21.3.1 +======= + +Bugfixes +-------- + +- na_ontap_snapmirror - check for consistency_group_volumes always fails on 9.7, and cluster or ipspace when using endpoints with ZAPI. + +v21.3.0 +======= + +Minor Changes +------------- + +- na_ontap_debug - improve error reporting for import errors on netapp_lib. +- na_ontap_flexcache - mount/unmount the FlexCache volume when using REST. +- na_ontap_flexcache - support REST APIs in addition to ZAPI for create and delete. +- na_ontap_flexcache - support for ``prepopulate`` option when using REST (requires ONTAP 9.8). +- na_ontap_igroups - new option ``igroups`` to support nested igroups (requires ONTAP 9.9). +- na_ontap_info - improve error reporting for import errors on netapp_lib, json, xlmtodict. +- na_ontap_motd - deprecated module warning and to use na_ontap_login_messages. +- na_ontap_volume - new suboption ``dr_cache`` when creating flexcache using NAS application template. +- na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume efficiency when it does not exist and apply additional parameters. +- na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume efficiency when it does not exist. + +Bugfixes +-------- + +- na_ontap_ldap_client - ``port`` was incorrectly used instead of ``tcp_port``. +- na_ontap_node - KeyError fix for location ans asset-tag parameters in get_node(). +- na_ontap_snapmirror - SVM scoped policies were not found when using a destination path with REST application. +- na_ontap_volume - changes in ``encrypt`` settings were ignored. +- na_ontap_volume - unmount volume before deleting it when using REST. + +New Modules +----------- + +- netapp.ontap.na_ontap_domain_tunnel - NetApp ONTAP domain tunnel +- netapp.ontap.na_ontap_fpolicy_policy - NetApp ONTAP - Create, delete or modify an FPolicy policy. +- netapp.ontap.na_ontap_security_config - NetApp ONTAP modify security config for SSL. +- netapp.ontap.na_ontap_storage_auto_giveback - Enables or disables NetApp ONTAP storage auto giveback for a specified node +- netapp.ontap.na_ontap_storage_failover - Enables or disables NetApp Ontap storage failover for a specified node + +v21.2.0 +======= + +Minor Changes +------------- + +- azure_rm_netapp_account - new option ``active_directories`` to support SMB volumes. +- azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. +- na_ontap_igroup - added REST support for ONTAP igroup creation, modification, and deletion. +- na_ontap_lun - add ``comment`` option. +- na_ontap_lun - convert existing LUNs and supporting volume to a smart container within a SAN application. +- na_ontap_lun - new option ``qos_adaptive_policy_group``. +- na_ontap_lun - new option ``scope`` to explicitly force operations on the SAN application or a single LUN. +- na_ontap_node - added modify function for location and asset tag for node. +- na_ontap_snapmirror - add new options ``source_endpoint`` and ``destination_endpoint`` to group endpoint suboptions. +- na_ontap_snapmirror - add new suboptions ``consistency_group_volumes`` and ``ipspace`` to endpoint options. +- na_ontap_snapmirror - deprecate older options for source and destination paths, volumes, vservers, and clusters. +- na_ontap_snapmirror - improve error reporting or warn when REST option is not supported. +- na_ontap_snapmirror - report warning when relationship is present but not healthy. + +Bugfixes +-------- + +- All REST modules - ONTAP 9.4 and 9.5 are incorrectly detected as supporting REST with ``use_rest:auto``. +- na_ontap_igroup - report error when attempting to modify an option that cannot be changed. +- na_ontap_lun - ``qos_policy_group`` could not be modified if a value was not provided at creation. +- na_ontap_lun - tiering options were ignored in san_application_template. +- na_ontap_volume - report error from resize operation when using REST. +- na_ontap_volume - returns an error now if deleting a volume with REST api fails. + +New Modules +----------- + +- netapp.ontap.na_ontap_cifs_local_group_member - NetApp Ontap - Add or remove CIFS local group member +- netapp.ontap.na_ontap_log_forward - NetApp ONTAP Log Forward Configuration +- netapp.ontap.na_ontap_lun_map_reporting_nodes - NetApp ONTAP LUN maps reporting nodes +- netapp.ontap.na_ontap_volume_efficiency - NetApp Ontap enables, disables or modifies volume efficiency + +v21.1.0 +======= + +Minor Changes +------------- + +- general - improve error reporting when older version of netapp-lib is used. +- na_ontap_cluster - ``time_out`` to wait for cluster creation, adding and removing a node. +- na_ontap_debug - connection diagnostics added for invalid ipaddress and DNS hostname errors. +- na_ontap_firmware_upgrade - new option for firmware type ``storage`` added. +- na_ontap_info - deprecate ``state`` option. +- na_ontap_lun - new options ``total_size`` and ``total_size_unit`` when using SAN application template. +- na_ontap_lun - support increasing lun_count and total_size when using SAN application template. +- na_ontap_quota - allow to turn quota on/off without providing quota_target or type. +- na_ontap_rest_info - deprecate ``state`` option. +- na_ontap_snapmirror - new option ``create_destination`` to automatically create destination endpoint (ONTAP 9.7). +- na_ontap_snapmirror - new option ``destination_cluster`` to automatically create destination SVM for SVM DR (ONTAP 9.7). +- na_ontap_snapmirror - new option ``source_cluster`` to automatically set SVM peering (ONTAP 9.7). +- na_ontap_snapmirror - use REST API for create action if target supports it. (ZAPIs are still used for all other actions). +- na_ontap_volume - use REST API for delete operation if targets supports it. + +Bugfixes +-------- + +- na_ontap_lun - REST expects 'all' for tiering policy and not 'backup'. +- na_ontap_quotas - Handle blank string idempotency issue for ``quota_target`` in quotas module. +- na_ontap_rest_info - ``changed`` was set to "False" rather than boolean False. +- na_ontap_snapmirror - fix job update failures for load_sharing mirrors. +- na_ontap_snapmirror - report error when attempting to change relationship_type. +- na_ontap_snapmirror - wait up to 5 minutes for abort to complete before issuing a delete. +- na_ontap_snmp - SNMP module wrong ``access_control`` issue and error handling fix. +- na_ontap_volume - REST expects 'all' for tiering policy and not 'backup'. +- na_ontap_volume - detect and report error when attempting to change FlexVol into FlexGroup. +- na_ontap_volume - report error if ``aggregate_name`` option is used with a FlexGroup. + +New Modules +----------- + +- netapp.ontap.na_ontap_debug - NetApp ONTAP Debug netapp-lib import and connection. + +v20.12.0 +======== + +Minor Changes +------------- + +- all ZAPI modules - new ``classic_basic_authorization`` feature_flag to disable adding Authorization header proactively. +- all ZAPI modules - optimize Basic Authentication by adding Authorization header proactively. +- na_ontap_igroup - new option ``os_type`` to replace ``ostype`` (but ostype is still accepted). +- na_ontap_info - New options ``cifs_options_info``, ``cluster_log_forwarding_info``, ``event_notification_destination_info``, ``event_notification_info``, ``security_login_role_config_info``, ``security_login_role_info`` have been added. +- na_ontap_lun - new option ``from_name`` to rename a LUN. +- na_ontap_lun - new option ``os_type`` to replace ``ostype`` (but ostype is still accepted), and removed default to ``image``. +- na_ontap_lun - new option ``qos_policy_group`` to assign a qos_policy_group to a LUN. +- na_ontap_lun - new option ``san_application_template`` to create LUNs without explicitly creating a volume and using REST APIs. +- na_ontap_qos_policy_group - new option ``is_shared`` for sharing QOS SLOs or not. +- na_ontap_quota_policy - new option ``auto_assign`` to assign quota policy to vserver. +- na_ontap_quotas - New option ``activate_quota_on_change`` to resize or reinitialize quotas. +- na_ontap_quotas - New option ``perform_user_mapping`` to perform user mapping for the user specified in quota-target. +- na_ontap_rest_info - Support for gather subsets - ``cifs_home_directory_info, cluster_software_download, event_notification_info, event_notification_destination_info, security_login_info, security_login_rest_role_info`` +- na_ontap_volume - ``compression`` to enable compression on a FAS volume. +- na_ontap_volume - ``inline-compression`` to enable inline compression on a volume. +- na_ontap_volume - ``nas_application_template`` to create a volume using nas application REST API. +- na_ontap_volume - ``size_change_threshold`` to ignore small changes in volume size. +- na_ontap_volume - ``sizing_method`` to resize a FlexGroup using REST. + +Bugfixes +-------- + +- na_ontap_broadcast_domain_ports - handle ``changed`` for check_mode and report correctly. +- na_ontap_cifs - fix for AttributeError - 'NoneType' object has no attribute 'get' on line 300 +- na_ontap_svm - warning for ``aggr_list`` wildcard value(``*``) in create idempotency. +- na_ontap_user - application expects only ``service_processor`` but module supports ``service-processor``. +- na_ontap_volume - checking for success before failure lead to 'NoneType' object has no attribute 'get_child_by_name' when modifying a Flexcache volume. +- na_ontap_volume - fix volume type modify issue by reporting error. + +v20.11.0 +======== + +Minor Changes +------------- + +- na_ontap_cifs - output ``modified`` if a modify action is taken. +- na_ontap_cluster_peer - optional parameter ``ipspace`` added for cluster peer. +- na_ontap_export_policy_rule - minor doc updates. +- na_ontap_info - do not require write access privileges. This also enables other modules to work in check_mode without write access permissions. +- na_ontap_interface - minor example update. +- na_ontap_lun - ``use_exact_size`` to create a lun with the exact given size so that the lun is not rounded up. +- na_ontap_lun - support modify for space_allocation and space_reserve. +- na_ontap_mcc_mediator - improve error reporting when REST is not available. +- na_ontap_metrocluster - improve error reporting when REST is not available. +- na_ontap_software_update - add `force_update` option to ignore current version. +- na_ontap_svm - output ``modified`` if a modify action is taken. +- na_ontap_wwpn_alias - improve error reporting when REST is not available. + +Bugfixes +-------- + +- All REST modules, will not fail if a job fails +- na_ontap_cifs - fix idempotency issue when ``show-previous-versions`` is used. +- na_ontap_firmware_upgrade - fix ValueError issue when processing URL error. +- na_ontap_info - Use ``node-id`` as key rather than ``current-version``. +- na_ontap_ipspace - invalid call in error reporting (double error). +- na_ontap_software_update - module is not idempotent. + +New Modules +----------- + +- netapp.ontap.na_ontap_metrocluster_dr_group - NetApp ONTAP manage MetroCluster DR Group + +v20.10.0 +======== + +Minor Changes +------------- + +- na_ontap_rest_info - Support for gather subsets - ``application_info, application_template_info, autosupport_config_info , autosupport_messages_history, ontap_system_version, storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, support_ems_filters`` + +Bugfixes +-------- + +- na_ontap_aggregate - support concurrent actions for rename/modify/add_object_store and create/add_object_store. +- na_ontap_cluster - ``single_node_cluster`` option was ignored. +- na_ontap_info - KeyError on ``tree`` for quota_report_info. +- na_ontap_info - better reporting on KeyError traceback, option to ignore error. +- na_ontap_snapmirror_policy - report error when attempting to change ``policy_type`` rather than taking no action. +- na_ontap_volume - ``encrypt`` with a value of ``false`` is ignored when creating a volume. + +v20.9.0 +======= + +Minor Changes +------------- + +- na_ontap_cluster - ``node_name`` to set the node name when adding a node, or as an alternative to `cluster_ip_address`` to remove a node. +- na_ontap_cluster - ``state`` can be set to ``absent`` to remove a node identified with ``cluster_ip_address`` or ``node_name``. +- na_ontap_qtree - ``wait_for_completion`` and ``time_out`` to wait for qtree deletion when using REST. +- na_ontap_quotas - ``soft_disk_limit`` and ``soft_file_limit`` for the quota target. +- na_ontap_rest_info - Support for gather subsets - ``initiator_groups_info, san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, storage_luns_info, storage_NVMe_namespaces.`` + +Bugfixes +-------- + +- na_ontap_* - change version_added from '2.6' to '2.6.0' where applicable to satisfy sanity checker. +- na_ontap_cluster - ``check_mode`` is now working properly. +- na_ontap_interface - ``home_node`` is not required in pre-cluster mode. +- na_ontap_interface - ``role`` is not required if ``service_policy`` is present and ONTAP version is 9.8. +- na_ontap_interface - traceback in get_interface if node is not reachable. +- na_ontap_job_schedule - allow ``job_minutes`` to set number to -1 for job creation with REST too. +- na_ontap_qtree - fixed ``None is not subscriptable`` exception on rename operation. +- na_ontap_volume - fixed ``KeyError`` exception on ``size`` when reporting creation error. +- netapp.py - uncaught exception (traceback) on zapi.NaApiError. + +New Modules +----------- + +- netapp.ontap.na_ontap_active_directory - NetApp ONTAP configure active directory +- netapp.ontap.na_ontap_mcc_mediator - NetApp ONTAP Add and Remove MetroCluster Mediator +- netapp.ontap.na_ontap_metrocluster - NetApp ONTAP set up a MetroCluster + +v20.8.0 +======= + +Minor Changes +------------- + +- add ``type:`` and ``elements:`` information where missing. +- na_ontap_aggregate - support ``disk_size_with_unit`` option. +- na_ontap_ldap_client - support ``ad_domain`` and ``preferred_ad_server`` options. +- na_ontap_qtree - ``force_delete`` option with a DEFAULT of ``true`` so that ZAPI behavior is aligned with REST. +- na_ontap_rest_info - Support for gather subsets - ``cloud_targets_info, cluster_chassis_info, cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, svm_nis_config_info, svm_peers_info, svm_peer-permissions_info``. +- na_ontap_rest_info - Support for gather subsets for 9.8+ - ``cluster_metrocluster_diagnostics``. +- na_ontap_security_certificates - ``ignore_name_if_not_supported`` option to not fail if ``name`` is present since ``name`` is not supported in ONTAP 9.6 and 9.7. +- na_ontap_software_update - added ``timeout`` option to give enough time for the update to complete. +- update ``required:`` information. +- use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + +Bugfixes +-------- + +- na_ontap_aggregate - ``disk-info`` error when using ``disks`` option. +- na_ontap_autosupport_invoke - ``message`` has changed to ``autosupport_message`` as Redhat has reserved this word. ``message`` has been alias'd to ``autosupport_message``. +- na_ontap_cifs_vserver - fix documentation and add more examples. +- na_ontap_cluster - module was not idempotent when changing location or contact information. +- na_ontap_igroup - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). +- na_ontap_igroup_initiator - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). +- na_ontap_info - Fixed error causing module to fail on ``metrocluster_check_info``, ``env_sensors_info`` and ``volume_move_target_aggr_info``. +- na_ontap_security_certificates - allows (``common_name``, ``type``) as an alternate key since ``name`` is not supported in ONTAP 9.6 and 9.7. +- na_ontap_snapmirror - fixed KeyError when accessing ``elationship_type`` parameter. +- na_ontap_snapmirror_policy - fixed a race condition when creating a new policy. +- na_ontap_snapmirror_policy - fixed idempotency issue withis_network_compression_enabled for REST. +- na_ontap_software_update - ignore connection errors during update as nodes cannot be reachable. +- na_ontap_user - enable lock state and password to be set in the same task for existing user. +- na_ontap_volume - issue when snapdir_access and atime_update not passed together. +- na_ontap_vscan_on_access_policy - ``bool`` type was not properly set for ``scan_files_with_no_ext``. +- na_ontap_vscan_on_access_policy - ``policy_status`` enable/disable option was not supported. +- na_ontap_vscan_on_demand_task - ``file_ext_to_include`` was not handled properly. +- na_ontap_vscan_scanner_pool_policy - scanner_pool apply policy support on modification. +- na_ontap_vserver_create(role) - lif creation now defaults to system-defined unless iscsi lif type. +- use_rest is now case insensitive. + +New Modules +----------- + +- netapp.ontap.na_ontap_file_directory_policy - NetApp ONTAP create, delete, or modify vserver security file-directory policy +- netapp.ontap.na_ontap_ssh_command - NetApp ONTAP Run any cli command over plain SSH using paramiko. +- netapp.ontap.na_ontap_wait_for_condition - NetApp ONTAP wait_for_condition. Loop over a get status request until a condition is met. + +v20.7.0 +======= + +Minor Changes +------------- + +- module_utils/netapp - add retry on wait_on_job when job failed. Abort 3 consecutive errors. +- na_ontap_info - support ``continue_on_error`` option to continue when a ZAPI is not supported on a vserver, or for cluster RPC errors. +- na_ontap_info - support ``query`` option to specify which objects to return. +- na_ontap_info - support ``vserver`` tunneling to limit output to one vserver. +- na_ontap_pb_get_online_volumes.yml - example playbook to list volumes that are online (or offline). +- na_ontap_pb_install_SSL_certificate_REST.yml - example playbook to install SSL certificates using REST APIs. +- na_ontap_rest_info - Support for gather subsets - ``cluster_node_info, cluster_peer_info, disk_info, cifs_services_info, cifs_share_info``. +- na_ontap_snapmirror_policy - support for SnapMirror policy rules. +- na_ontap_vscan_scanner_pool - support modification. + +Bugfixes +-------- + +- na_ontap_command - replace invalid backspace characters (0x08) with '.'. +- na_ontap_firmware_download - exception on PCDATA if ONTAP returns a BEL (0x07) character. +- na_ontap_info - lists were incorrectly processed in convert_keys, returning {}. +- na_ontap_info - qtree_info is missing most entries. Changed key from `vserver:id` to `vserver:volume:id` . +- na_ontap_iscsi_security - adding no_log for password parameters. +- na_ontap_portset - adding explicit error message as modify portset is not supported. +- na_ontap_snapmirror - fixed snapmirror delete for loadsharing to not go to quiesce state for the rest of the set. +- na_ontap_ucadapter - fixed KeyError if type is not provided and mode is 'cna'. +- na_ontap_user - checked `applications` does not contain snmp when using REST API call. +- na_ontap_user - fixed KeyError if locked key not set with REST API call. +- na_ontap_user - fixed KeyError if vserver - is empty with REST API call (useful to indicate cluster scope). +- na_ontap_volume - fixed KeyError when getting info on a MVD volume + +New Modules +----------- + +- netapp.ontap.na_ontap_security_certificates - NetApp ONTAP manage security certificates. + +v20.6.1 +======= + +Minor Changes +------------- + +- na_ontap_firmware_upgrade - ``reboot_sp`` - reboot service processor before downloading package. +- na_ontap_firmware_upgrade - ``rename_package`` - rename file when downloading service processor package. +- na_ontap_firmware_upgrade - ``replace_package`` - replace local file when downloading service processor package. + +Bugfixes +-------- + +- na_ontap_firmware_upgrade - images are not downloaded, but the module reports success. +- na_ontap_password - do not error out if password is identical to previous password (idempotency). +- na_ontap_user - fixed KeyError if password is not provided. + +v20.6.0 +======= + +Minor Changes +------------- + +- all modules - SSL certificate authentication in addition to username/password (python 2.7 or 3.x). +- all modules - ``cert_filepath``, ``key_filepath`` to enable SSL certificate authentication (python 2.7 or 3.x). +- na_ontap_disks - ``disk_type`` option allows to assign specified type of disk. +- na_ontap_firmware_upgrade - ignore timeout when downloading image unless ``fail_on_502_error`` is set to true. +- na_ontap_info - ``desired_attributes`` advanced feature to select which fields to return. +- na_ontap_info - ``use_native_zapi_tags`` to disable the conversion of '_' to '-' for attribute keys. +- na_ontap_pb_install_SSL_certificate.yml - playbook example - installing a self-signed SSL certificate, and enabling SSL certificate authentication. +- na_ontap_rest_info - ``fields`` options to request specific fields from subset. +- na_ontap_snapmirror - now performs restore with optional field ``source_snapshot`` for specific snapshot or uses latest. +- na_ontap_software_update - ``stabilize_minutes`` option specifies number of minutes needed to stabilize node before update. +- na_ontap_ucadapter - ``pair_adapters`` option allows specifying the list of adapters which also need to be offline. +- na_ontap_user - ``authentication_password`` option specifies password for the authentication protocol of SNMPv3 user. +- na_ontap_user - ``authentication_protocol`` option specifies authentication protocol fo SNMPv3 user. +- na_ontap_user - ``engine_id`` option specifies authoritative entity's EngineID for the SNMPv3 user. +- na_ontap_user - ``privacy_password`` option specifies password for the privacy protocol of SNMPv3 user. +- na_ontap_user - ``privacy_protocol`` option specifies privacy protocol of SNMPv3 user. +- na_ontap_user - ``remote_switch_ipaddress`` option specifies the IP Address of the remote switch of SNMPv3 user. +- na_ontap_user - added REST support for ONTAP user creation, modification & deletion. +- na_ontap_volume - ``auto_remap_luns`` option controls automatic mapping of LUNs during volume rehost. +- na_ontap_volume - ``check_interval`` option checks if a volume move has been completed and then waits this number of seconds before checking again. +- na_ontap_volume - ``force_restore`` option forces volume to restore even if the volume has one or more newer Snapshotcopies. +- na_ontap_volume - ``force_unmap_luns`` option controls automatic unmapping of LUNs during volume rehost. +- na_ontap_volume - ``from_vserver`` option allows volume rehost from one vserver to another. +- na_ontap_volume - ``preserve_lun_ids`` option controls LUNs in the volume being restored will remain mapped and their identities preserved. +- na_ontap_volume - ``snapshot_restore`` option specifies name of snapshot to restore from. + +Bugfixes +-------- + +- module_utils/netapp_module - cater for empty lists in get_modified_attributes(). +- module_utils/netapp_module - cater for lists with duplicate elements in compare_lists(). +- na_ontap_firmware_upgrade - ignore timeout when downloading firmware images by default. +- na_ontap_info - conversion from '-' to '_' was not done for lists of dictionaries. +- na_ontap_ntfs_dacl - example fix in documentation string. +- na_ontap_snapmirror - could not delete all rules (bug in netapp_module). +- na_ontap_volume - `wait_on_completion` is supported with volume moves. +- na_ontap_volume - fix KeyError on 'style' when volume is of type - data-protection. +- na_ontap_volume - modify was invoked multiple times when once is enough. + +v20.5.0 +======= + +Minor Changes +------------- + +- na_ontap_aggregate - ``raid_type`` options supports 'raid_0' for ONTAP Select. +- na_ontap_cluster_config - role - Port Flowcontrol and autonegotiate can be set in role +- na_ontap_cluster_peer - ``encryption_protocol_proposed`` option allows specifying encryption protocol to be used for inter-cluster communication. +- na_ontap_info - new fact - aggr_efficiency_info. +- na_ontap_info - new fact - cluster_switch_info. +- na_ontap_info - new fact - disk_info. +- na_ontap_info - new fact - env_sensors_info. +- na_ontap_info - new fact - net_dev_discovery_info. +- na_ontap_info - new fact - service_processor_info. +- na_ontap_info - new fact - shelf_info. +- na_ontap_info - new fact - sis_info. +- na_ontap_info - new fact - subsys_health_info. +- na_ontap_info - new fact - sys_cluster_alerts. +- na_ontap_info - new fact - sysconfig_info. +- na_ontap_info - new fact - volume_move_target_aggr_info. +- na_ontap_info - new fact - volume_space_info. +- na_ontap_nvme_namespace - ``block_size`` option allows specifying size in bytes of a logical block. +- na_ontap_snapmirror - snapmirror now allows resume feature. +- na_ontap_volume - ``cutover_action`` option allows specifying the action to be taken for cutover. + +Bugfixes +-------- + +- REST API call now honors the ``http_port`` parameter. +- REST API detection now works with vserver (use_rest - Auto). +- na_ontap_autosupport_invoke - when using ZAPI and name is not given, send autosupport message to all nodes in the cluster. +- na_ontap_cg_snapshot - properly states it does not support check_mode. +- na_ontap_cluster - ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster. +- na_ontap_cluster_ha - support check_mode. +- na_ontap_cluster_peer - EMS log wrongly uses destination credentials with source hostname. +- na_ontap_cluster_peer - support check_mode. +- na_ontap_disks - support check_mode. +- na_ontap_dns - support check_mode. +- na_ontap_efficiency_policy - change ``duration`` type from int to str to support '-' input. +- na_ontap_fcp - support check_mode. +- na_ontap_flexcache - support check_mode. +- na_ontap_info - `metrocluster_check_info` does not trigger a traceback but adds an "error" info element if the target system is not set up for metrocluster. +- na_ontap_license - support check_mode. +- na_ontap_login_messages - fix documentation link. +- na_ontap_node - support check mode. +- na_ontap_ntfs_sd - documentation string update for examples and made sure owner or group not mandatory. +- na_ontap_ports - now support check mode. +- na_ontap_restit - error can be a string in addition to a dict. This fix removes a traceback with AttributeError. +- na_ontap_routes - support Check Mode correctly. +- na_ontap_snapmirror - support check_mode. +- na_ontap_software_update - Incorrectly stated that it support check mode, it does not. +- na_ontap_svm_options - support check_mode. +- na_ontap_volume - fix KeyError on 'style' when volume is offline. +- na_ontap_volume - improve error reporting if required parameter is present but not set. +- na_ontap_volume - suppress traceback in wait_for_completion as volume may not be completely ready. +- na_ontap_volume_autosize - Support check_mode when `reset` option is given. +- na_ontap_volume_snaplock - fix documentation link. +- na_ontap_vserver_peer - EMS log wrongly uses destination credentials with source hostname. +- na_ontap_vserver_peer - support check_mode. + +New Modules +----------- + +- netapp.ontap.na_ontap_rest_info - NetApp ONTAP information gatherer using REST APIs + +v20.4.1 +======= + +Minor Changes +------------- + +- na_ontap_autosupport_invoke - added REST support for sending autosupport message. +- na_ontap_firmware_upgrade - ``force_disruptive_update`` and ``package_url`` options allows to make choices for download and upgrading packages. +- na_ontap_vserver_create has a new default variable ``netapp_version`` set to 140. If you are running 9.2 or below please add the variable to your playbook and set to 120 + +Bugfixes +-------- + +- na_ontap_info - ``metrocluster_check_info`` has been removed as it was breaking the info module for everyone who didn't have a metrocluster set up. We are working on adding this back in a future update. +- na_ontap_volume - ``volume_security_style`` option now allows modify. + +v20.4.0 +======= + +Minor Changes +------------- + +- na_ontap_aggregate - ``disk_count`` option allows adding additional disk to aggregate. +- na_ontap_info - ``max_records`` option specifies maximum number of records returned in a single ZAPI call. +- na_ontap_info - ``summary`` option specifies a boolean flag to control return all or none of the info attributes. +- na_ontap_info - new fact - iscsi_service_info. +- na_ontap_info - new fact - license_info. +- na_ontap_info - new fact - metrocluster_check_info. +- na_ontap_info - new fact - metrocluster_info. +- na_ontap_info - new fact - metrocluster_node_info. +- na_ontap_info - new fact - net_interface_service_policy_info. +- na_ontap_info - new fact - ontap_system_version. +- na_ontap_info - new fact - ontapi_version (and deprecate ontap_version, both fields are reported for now). +- na_ontap_info - new fact - qtree_info. +- na_ontap_info - new fact - quota_report_info. +- na_ontap_info - new fact - snapmirror_destination_info. +- na_ontap_interface - ``service_policy`` option to identify a single service or a list of services that will use a LIF. +- na_ontap_kerberos_realm - ``ad_server_ip`` option specifies IP Address of the Active Directory Domain Controller (DC). +- na_ontap_kerberos_realm - ``ad_server_name`` option specifies Host name of the Active Directory Domain Controller (DC). +- na_ontap_snapmirror - ``relationship-info-only`` option allows to manage relationship information. +- na_ontap_snapmirror_policy - REST is included and all defaults are removed from options. +- na_ontap_software_update - ``download_only`` options allows to download cluster image without software update. +- na_ontap_volume - ``snapshot_auto_delete`` option allows to manage auto delete settings of a specified volume. + +Bugfixes +-------- + +- na_ontap_cifs_server - delete AD account if username and password are provided when state=absent +- na_ontap_info - cifs_server_info - fix KeyError exception on ``domain`` if only ``domain-workgroup`` is present. +- na_ontap_info - return all records of each gathered subset. +- na_ontap_iscsi_security - Fixed modify functionality for CHAP and typo correction +- na_ontap_kerberos_realm - fix ``kdc_vendor`` case sensitivity issue. +- na_ontap_snapmirror - calling quiesce before snapmirror break. + +New Modules +----------- + +- netapp.ontap.na_ontap_autosupport_invoke - NetApp ONTAP send AutoSupport message +- netapp.ontap.na_ontap_ntfs_dacl - NetApp Ontap create, delate or modify NTFS DACL (discretionary access control list) +- netapp.ontap.na_ontap_ntfs_sd - NetApp ONTAP create, delete or modify NTFS security descriptor +- netapp.ontap.na_ontap_restit - NetApp ONTAP Run any REST API on ONTAP +- netapp.ontap.na_ontap_wwpn_alias - NetApp ONTAP set FCP WWPN Alias +- netapp.ontap.na_ontap_zapit - NetApp ONTAP Run any ZAPI on ONTAP + +v20.3.0 +======= + +Minor Changes +------------- + +- na_ontap_info - New info's added ``storage_bridge_info`` +- na_ontap_info - New info's added `cluster_identity_info`` +- na_ontap_snapmirror - performs resync when the ``relationship_state`` is active and the current state is broken-off. + +Bugfixes +-------- + +- na_ontap_volume_snaplock - Fixed KeyError exception on 'is-volume-append-mode-enabled' +- na_ontap_vscan_scanner_pool - has been updated to match the standard format used for all other ontap modules + +New Modules +----------- + +- netapp.ontap.na_ontap_snapmirror_policy - NetApp ONTAP create, delete or modify SnapMirror policies +- netapp.ontap.na_ontap_snmp_traphosts - NetApp ONTAP SNMP traphosts. + +v20.2.0 +======= + +Minor Changes +------------- + +- na_ontap_info - New info's added ``snapshot_info`` +- na_ontap_info - ``max_records`` option to set maximum number of records to return per subset. +- na_ontap_nas_create - role - fix typo in README file, add CIFS example. - +- na_ontap_snapmirror - ``relationship_state`` option for breaking the snapmirror relationship. +- na_ontap_snapmirror - ``update_snapmirror`` option for updating the snapmirror relationship. +- na_ontap_volume_clone - ``split`` option to split clone volume from parent volume. + +Bugfixes +-------- + +- na_ontap_cifs_server - Fixed KeyError exception on 'cifs_server_name' +- na_ontap_command - fixed traceback when using return_dict if u'1' is present in result value. +- na_ontap_login_messages - Fixed example documentation and spelling mistake issue +- na_ontap_nvme_subsystem - fixed bug when creating subsystem, vserver was not filtered. +- na_ontap_qtree - Fixed issue with Get function for REST +- na_ontap_svm - if language C.UTF-8 is specified, the module is not idempotent +- na_ontap_svm - if snapshot policy is changed, modify fails with "Extra input - snapshot_policy" +- na_ontap_volume_clone - fixed 'Extra input - parent-vserver' error when running as cluster admin. + +New Modules +----------- + +- netapp.ontap.na_ontap_volume_snaplock - NetApp ONTAP manage volume snaplock retention. + +v20.1.0 +======= + +Minor Changes +------------- + +- na_ontap_aggregate - add ``snaplock_type``. +- na_ontap_dns - added REST support for dns creation and modification on cluster vserver. +- na_ontap_igroup_initiator - ``force_remove`` to forcibly remove initiators from an igroup that is currently mapped to a LUN. +- na_ontap_info - New info's added ``cifs_server_info``, ``cifs_share_info``, ``cifs_vserver_security_info``, ``cluster_peer_info``, ``clock_info``, ``export_policy_info``, ``export_rule_info``, ``fcp_adapter_info``, ``fcp_alias_info``, ``fcp_service_info``, ``job_schedule_cron_info``, ``kerberos_realm_info``, ``ldap_client``, ``ldap_config``, ``net_failover_group_info``, ``net_firewall_info``, ``net_ipspaces_info``, ``net_port_broadcast_domain_info``, ``net_routes_info``, ``net_vlan_info``, ``nfs_info``, ``ntfs_dacl_info``, ``ntfs_sd_info``, ``ntp_server_info``, ``role_info``, ``service_processor_network_info``, ``sis_policy_info``, ``snapmirror_policy_info``, ``snapshot_policy_info``, ``vscan_info``, ``vserver_peer_info`` +- na_ontap_interface - ``failover_group`` to specify the failover group for the LIF. ``is_ipv4_link_local`` to specify the LIF's are to acquire a ipv4 link local address. +- na_ontap_rest_cli - add OPTIONS as a supported verb and return list of allowed verbs. +- na_ontap_volume - add ``group_id`` and ``user_id``. + +Bugfixes +-------- + +- na_ontap_aggregate - Fixed traceback when running as vsadmin and cleanly error out. +- na_ontap_command - stdout_lines_filter contains data only if include/exlude_lines parameter is used. (zeten30) +- na_ontap_command - stripped_line len is checked only once, filters are inside if block. (zeten30) +- na_ontap_interface - allow module to run on node before joining the cluster. +- na_ontap_net_ifgrp - Fixed error for na_ontap_net_ifgrp if no port is given. +- na_ontap_snapmirror - Fixed traceback when running as vsadmin. Do not attempt to break a relationship that is 'Uninitialized'. +- na_ontap_snapshot_policy - Fixed KeyError on ``prefix`` issue when prefix parameter isn't supplied. +- na_ontap_volume - Fixed error reporting if efficiency policy cannot be read. Do not attempt to read efficiency policy if not needed. +- na_ontap_volume - Fixed error when modifying volume efficiency policy. +- na_ontap_volume_clone - Fixed KeyError exception on ``volume`` + +New Modules +----------- + +- netapp.ontap.na_ontap_login_messages - Setup login banner and message of the day + +v19.11.0 +======== + +Minor Changes +------------- + +- na_ontap_cluster - added single node cluster option, also now supports for modify cluster contact and location option. +- na_ontap_efficiency_policy - ``changelog_threshold_percent`` to set the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour. +- na_ontap_info - Added ``vscan_status_info``, ``vscan_scanner_pool_info``, ``vscan_connection_status_all_info``, ``vscan_connection_extended_stats_info`` +- na_ontap_info - Now allow you use to vsadmin to get info (Must user ``vserver`` option). + +Bugfixes +-------- + +- na_ontap_cluster - autosupport log pushed after cluster create is performed, removed license add or remove option. +- na_ontap_dns - report error if modify or delete operations are attempted on cserver when using REST. Make create operation idempotent for cserver when using REST. Support for modify/delete on cserver when using REST will be added later. +- na_ontap_firewall_policy - portmap added as a valid service +- na_ontap_net_routes - REST does not support the ``metric`` attribute +- na_ontap_snapmirror - added initialize boolean option which specifies whether to initialize SnapMirror relation. +- na_ontap_volume - fixed error when deleting flexGroup volume with ONTAP 9.7. +- na_ontap_volume - tiering option requires 9.4 or later (error on volume-comp-aggr-attributes) +- na_ontap_vscan_scanner_pool - fix module only gets one scanner pool. + +New Modules +----------- + +- netapp.ontap.na_ontap_quota_policy - NetApp Ontap create, rename or delete quota policy + +v19.10.1 +======== + +New Modules +----------- + +- netapp.ontap.na_ontap_iscsi_security - NetApp ONTAP Manage iscsi security. + +v19.10.0 +======== + +Minor Changes +------------- + +- Added REST support to existing modules. + By default, the module will use REST if the target system supports it, and the options are supported. Otherwise, it will switch back to ZAPI. + This behavior can be controlled with the ``use_rest`` option. + Always - to force REST. The module fails and reports an error if REST cannot be used. + Never - to force ZAPI. This could be useful if you find some incompatibility with REST, or want to confirm the behavior is identical between REST and ZAPI. + Auto - the default, as described above. +- na_ontap_cluster_config - role updated to support a cleaner playbook +- na_ontap_command - ``vserver`` - to allow command to run as either cluster admin or vserver admin. To run as vserver admin you must use the vserver option. +- na_ontap_export_policy - REST support +- na_ontap_ipspace - REST support +- na_ontap_job_schedule - REST support +- na_ontap_motd - rename ``message`` to ``motd_message`` to avoid conflict with Ansible internal variable name. +- na_ontap_nas_create - role updated to support a cleaner playbook +- na_ontap_ndmp - REST support - only ``enable`` and ``authtype`` are supported with REST +- na_ontap_net_routes - REST support +- na_ontap_nvme_namespace - ``size_unit`` to specify size in different units. +- na_ontap_qtree - REST support - ``oplocks`` is not supported with REST, defaults to enable. +- na_ontap_san_create - role updated to support a cleaner playbook +- na_ontap_snapshot_policy - ``prefix`` - option to use for creating snapshot policy. +- na_ontap_svm - REST support - ``root_volume``, ``root_volume_aggregate``, ``root_volume_security_style`` are not supported with REST. +- na_ontap_vserver_create - role updated to support a cleaner playbook + +Bugfixes +-------- + +- na ontap_net_routes - change metric type from string to int. +- na_ontap_cifs_server - minor documentation changes correction of create example with "name" parameter and adding type to parameters. +- na_ontap_firewall_policy - documentation changed for supported service parameter. +- na_ontap_ndmp - minor documentation changes for restore_vm_cache_size and data_port_range. +- na_ontap_net_subnet - fix ip_ranges option fails on existing subnet. +- na_ontap_net_subnet - fix rename idempotency issue and updated rename check. +- na_ontap_nvme_subsystem - fix fetching unique nvme subsytem based on vserver filter. +- na_ontap_qtree - REST API takes "unix_permissions" as parameter instead of "mode". +- na_ontap_qtree - unix permission is not available when security style is ntfs +- na_ontap_snapshot_policy - fix vsadmin approach for managing snapshot policy. +- na_ontap_svm - ``allowed_protocols`` added to param in proper way in case of using REST API +- na_ontap_user - minor documentation update for application parameter. +- na_ontap_volume - ``efficiency_policy`` was ignored +- na_ontap_volume - enforce that space_slo and space_guarantee are mutually exclusive +- na_ontap_vserver_cifs_security - fix int and boolean options when modifying vserver cifs security. + +v2.9.0 +====== + +New Modules +----------- + +- netapp.ontap.na_ontap_efficiency_policy - NetApp ONTAP manage efficiency policies (sis policies) +- netapp.ontap.na_ontap_firmware_upgrade - NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk. +- netapp.ontap.na_ontap_info - NetApp information gatherer +- netapp.ontap.na_ontap_ipspace - NetApp ONTAP Manage an ipspace +- netapp.ontap.na_ontap_kerberos_realm - NetApp ONTAP vserver nfs kerberos realm +- netapp.ontap.na_ontap_ldap - NetApp ONTAP LDAP +- netapp.ontap.na_ontap_ldap_client - NetApp ONTAP LDAP client +- netapp.ontap.na_ontap_ndmp - NetApp ONTAP NDMP services configuration +- netapp.ontap.na_ontap_object_store - NetApp ONTAP manage object store config. +- netapp.ontap.na_ontap_ports - NetApp ONTAP add/remove ports +- netapp.ontap.na_ontap_qos_adaptive_policy_group - NetApp ONTAP Adaptive Quality of Service policy group. +- netapp.ontap.na_ontap_rest_cli - NetApp ONTAP Run any cli command, the username provided needs to have console login permission. +- netapp.ontap.na_ontap_volume_autosize - NetApp ONTAP manage volume autosize +- netapp.ontap.na_ontap_vscan - NetApp ONTAP Vscan enable/disable. +- netapp.ontap.na_ontap_vserver_cifs_security - NetApp ONTAP vserver CIFS security modification + +v2.8.0 +====== + +New Modules +----------- + +- netapp.ontap.na_ontap_flexcache - NetApp ONTAP FlexCache - create/delete relationship +- netapp.ontap.na_ontap_igroup_initiator - NetApp ONTAP igroup initiator configuration +- netapp.ontap.na_ontap_lun_copy - NetApp ONTAP copy LUNs +- netapp.ontap.na_ontap_net_subnet - NetApp ONTAP Create, delete, modify network subnets. +- netapp.ontap.na_ontap_nvme - NetApp ONTAP Manage NVMe Service +- netapp.ontap.na_ontap_nvme_namespace - NetApp ONTAP Manage NVME Namespace +- netapp.ontap.na_ontap_nvme_subsystem - NetApp ONTAP Manage NVME Subsystem +- netapp.ontap.na_ontap_portset - NetApp ONTAP Create/Delete portset +- netapp.ontap.na_ontap_qos_policy_group - NetApp ONTAP manage policy group in Quality of Service. +- netapp.ontap.na_ontap_quotas - NetApp ONTAP Quotas +- netapp.ontap.na_ontap_security_key_manager - NetApp ONTAP security key manager. +- netapp.ontap.na_ontap_snapshot_policy - NetApp ONTAP manage Snapshot Policy +- netapp.ontap.na_ontap_unix_group - NetApp ONTAP UNIX Group +- netapp.ontap.na_ontap_unix_user - NetApp ONTAP UNIX users +- netapp.ontap.na_ontap_vscan_on_access_policy - NetApp ONTAP Vscan on access policy configuration. +- netapp.ontap.na_ontap_vscan_on_demand_task - NetApp ONTAP Vscan on demand task configuration. +- netapp.ontap.na_ontap_vscan_scanner_pool - NetApp ONTAP Vscan Scanner Pools Configuration. + +v2.7.0 +====== + +New Modules +----------- + +- netapp.ontap.na_ontap_autosupport - NetApp ONTAP Autosupport +- netapp.ontap.na_ontap_cg_snapshot - NetApp ONTAP manage consistency group snapshot +- netapp.ontap.na_ontap_cluster_peer - NetApp ONTAP Manage Cluster peering +- netapp.ontap.na_ontap_command - NetApp ONTAP Run any cli command, the username provided needs to have console login permission. +- netapp.ontap.na_ontap_disks - NetApp ONTAP Assign disks to nodes +- netapp.ontap.na_ontap_dns - NetApp ONTAP Create, delete, modify DNS servers. +- netapp.ontap.na_ontap_fcp - NetApp ONTAP Start, Stop and Enable FCP services. +- netapp.ontap.na_ontap_firewall_policy - NetApp ONTAP Manage a firewall policy +- netapp.ontap.na_ontap_motd - Setup motd +- netapp.ontap.na_ontap_node - NetApp ONTAP Rename a node. +- netapp.ontap.na_ontap_snapmirror - NetApp ONTAP or ElementSW Manage SnapMirror +- netapp.ontap.na_ontap_software_update - NetApp ONTAP Update Software +- netapp.ontap.na_ontap_svm_options - NetApp ONTAP Modify SVM Options +- netapp.ontap.na_ontap_vserver_peer - NetApp ONTAP Vserver peering + +v2.6.0 +====== + +New Modules +----------- + +- netapp.ontap.na_ontap_aggregate - NetApp ONTAP manage aggregates. +- netapp.ontap.na_ontap_broadcast_domain - NetApp ONTAP manage broadcast domains. +- netapp.ontap.na_ontap_broadcast_domain_ports - NetApp ONTAP manage broadcast domain ports +- netapp.ontap.na_ontap_cifs - NetApp ONTAP Manage cifs-share +- netapp.ontap.na_ontap_cifs_acl - NetApp ONTAP manage cifs-share-access-control +- netapp.ontap.na_ontap_cifs_server - NetApp ONTAP CIFS server configuration +- netapp.ontap.na_ontap_cluster - NetApp ONTAP cluster - create a cluster and add/remove nodes. +- netapp.ontap.na_ontap_cluster_ha - NetApp ONTAP Manage HA status for cluster +- netapp.ontap.na_ontap_export_policy - NetApp ONTAP manage export-policy +- netapp.ontap.na_ontap_export_policy_rule - NetApp ONTAP manage export policy rules +- netapp.ontap.na_ontap_igroup - NetApp ONTAP iSCSI or FC igroup configuration +- netapp.ontap.na_ontap_interface - NetApp ONTAP LIF configuration +- netapp.ontap.na_ontap_iscsi - NetApp ONTAP manage iSCSI service +- netapp.ontap.na_ontap_job_schedule - NetApp ONTAP Job Schedule +- netapp.ontap.na_ontap_license - NetApp ONTAP protocol and feature licenses +- netapp.ontap.na_ontap_lun - NetApp ONTAP manage LUNs +- netapp.ontap.na_ontap_lun_map - NetApp ONTAP LUN maps +- netapp.ontap.na_ontap_net_ifgrp - NetApp Ontap modify network interface group +- netapp.ontap.na_ontap_net_port - NetApp ONTAP network ports. +- netapp.ontap.na_ontap_net_routes - NetApp ONTAP network routes +- netapp.ontap.na_ontap_net_vlan - NetApp ONTAP network VLAN +- netapp.ontap.na_ontap_nfs - NetApp ONTAP NFS status +- netapp.ontap.na_ontap_ntp - NetApp ONTAP NTP server +- netapp.ontap.na_ontap_qtree - NetApp ONTAP manage qtrees +- netapp.ontap.na_ontap_service_processor_network - NetApp ONTAP service processor network +- netapp.ontap.na_ontap_snapshot - NetApp ONTAP manage Snapshots +- netapp.ontap.na_ontap_snmp - NetApp ONTAP SNMP community +- netapp.ontap.na_ontap_svm - NetApp ONTAP SVM +- netapp.ontap.na_ontap_ucadapter - NetApp ONTAP UC adapter configuration +- netapp.ontap.na_ontap_user - NetApp ONTAP user configuration and management +- netapp.ontap.na_ontap_user_role - NetApp ONTAP user role configuration and management +- netapp.ontap.na_ontap_volume - NetApp ONTAP manage volumes. +- netapp.ontap.na_ontap_volume_clone - NetApp ONTAP manage volume clones. diff --git a/ansible_collections/netapp/ontap/COPYING b/ansible_collections/netapp/ontap/COPYING new file mode 100644 index 000000000..20d40b6bc --- /dev/null +++ b/ansible_collections/netapp/ontap/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/FILES.json b/ansible_collections/netapp/ontap/FILES.json new file mode 100644 index 000000000..6d2b6162a --- /dev/null +++ b/ansible_collections/netapp/ontap/FILES.json @@ -0,0 +1,7019 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ab031084649c1857b4f90b7ed68ee3f530d51892ca81846bfbdd4657550cccc", + "format": 1 + }, + { + "name": "execution_environments/from_galaxy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments/from_galaxy/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4a1200215a7804d7f5407d160f23bfb6f5da7d3b15fd06df5bc7b0820e35879", + "format": 1 + }, + { + "name": "execution_environments/from_galaxy/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b767ca2979ad5896c5c47f8982a0c6e361009fae5addd38c05a6fc6666e553a", + "format": 1 + }, + { + "name": "execution_environments/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "670d7411a313c2d52ae76df7045fdd7c3476ab6fbeb68b6c4e3c4f0bf1d518ec", + "format": 1 + }, + { + "name": "execution_environments/from_github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "execution_environments/from_github/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4a1200215a7804d7f5407d160f23bfb6f5da7d3b15fd06df5bc7b0820e35879", + "format": 1 + }, + { + "name": "execution_environments/from_github/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aee3dd26819eea1223c95657f1abedd6af88c823f596e26a95998b64e2ed2da6", + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3aeb49e78fdcbc7d398d15911124447bb5993a7a2f8598b3dca9677c5ef684a9", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1e7fbae8521ef7961beab2fcb7c1b99a0cada7fa16ed5b67970cf4d274466d5", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec34efdb000ccbfa336b4f9f09229dae445132884bca6ab83903e20273e620d9", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41e555cb2f33a437a53aa2661d8dc967460138b2363f2484c783bad754e65975", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_owning_resource.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c409d30c00004cd3138e3e9aa385081319318757404e6df19ec525885e53b932", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_response_helpers.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "525a486f182786d6fca33391ea4aec04f0684822402d01741d4b051d85cd5a25", + "format": 1 + }, + { + "name": "plugins/module_utils/zapis_svm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ded53b78e030deb34304678cf75ca48cb5a46432dddbac3f1698271be91eac9", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8820dff0fa21f3c4c352746613f82637551e734f25483896d8fc3820990f9e0c", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_elementsw_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a4578593c172f86e9867a04609ba45ea65d3ba715fc6d469bca04f043932b2f", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_generic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fc9400481984dd81be5ae955dc79f649d946a66dc84f980966a2888aa900ce5", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_application.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7eaf84f59cee761a3557db2e7bc8f67a0df50f00916cbbfedd4d7def0ec7f1ab", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_ipaddress.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bbd40a7ea14bfa0249cb14c1d070cac6a4cf98e0eedfc70ebf2964be27eb720", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_vserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcd8f9ff54db8fd3372b2251919ab5a1f3b5574aa0242e99efd7312b39fac582", + "format": 1 + }, + { + "name": "plugins/module_utils/rest_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4218e8dd9d5512341a58e194d89f30473261d070f8317c5be500a618d73615ea", + "format": 1 + }, + { + "name": "plugins/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter/iso8601_duration_from_seconds.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46e5af04fedefb84547500f0aec10883288a832a360db53ad7a6a248efe98828", + "format": 1 + }, + { + "name": "plugins/filter/iso8601_duration_to_seconds.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba1aa921a4afe660ca7223a2d3de0bf62e4c9699698b78e3457e26a768de69f0", + "format": 1 + }, + { + "name": "plugins/filter/na_filter_iso8601.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3dc2edc7b875b6509892f273f1f0c7e5d09bcf30bb7f7998eaf3f66470e5969d", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_lun_map_reporting_nodes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c242cdda8ed0d6909df6850414f5b669d0c0f6bfa5f734c0d67d0190918db78a", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_file_security_permissions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24e9bf4de23b8e91863c850f6da3ebf0d7bc621526e8b28435790a5c0879e927", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_job_schedule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28489900c7b923f4255a10b589e9f62e1a59c047b908376e2c7225d3c7c838c7", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ntfs_dacl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fbccbddc3f6fcbe7aeb17ef3825e4aca084c6e35e5df5fcada810cc6fa63a069", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fdsd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a145d8ef8df3095d051c279be7d812c405674da939a4b3882720a50f92da5e67", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_efficiency_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63d7d2efca9d2feaf604012ea35c1d2ac16ee83437617cb5ab2a62cdbef22052", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_local_hosts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7e7b9126791d44c1a232b12041574b809c6fd10a0f6727532f89685d374df81", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_quota_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db0c2fbc7284db807caf3a94a4fe949811177a649917c1b90ebe74dba090594b", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fpolicy_status.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd12d57808eb21ae700d3fbf58ff0c760d9abe8fef73b34fa96c131edb664c7c", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_flexcache.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24e6dd82e74d4da3b2a6eb0880bb022d7a486e81ed39162bfaff648994763b24", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_service_processor_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "477eecc2645a7b155d187cb0d7d0090c516844661b11fc5bf9b6da627c9128ca", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snapmirror_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f241091d2004430814c8d7b97353ac83b8f4283b3bf4e62358ad5b7c25910da", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vscan_on_demand_task.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67d192f7b0cba01fda9de0ed00009a1890f444b76c8103d9432d1c981b22f915", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_dns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fe57a0b4047c049393d27cda4e811ef882a14f6d31a1d0281fab65f68e98f23", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fpolicy_event.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "060819258a190809e30c859f855e9fc1f628170e3e94af9bcd4634f55c164487", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18c64fc05c7e46c19552e044d825704244dd07d308b0aa34cdc488436b727095", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ee1b2a761551734cc35fefca8ab1ae77da4030a43de2681a2665c30ed4866e2", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_export_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e15d6f37c9a516d7ec213d9806fe1cebd130a24dc16e5f28b1da0b39e452fdc", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_metrocluster_dr_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7821aae8ec47badbfc7fa7f21eab3b22e76919e4ea3f9569519558b00c603fd5", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_publickey.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e94c6d17ec2411b2683ce033b0f8af61ed30824a178147168d04affba58a597d", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ports.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e1e0729f9855a93283e280703c4a21c38b6175da19856fd306cd9197668f23f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_log_forward.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72e2fc321f2ea74789c8a63d5b8e13b235cd669529f685f53b6a9e3e6be1df83", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_restit.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9c29e8a5dac84b2dda82f8456541055f0c2507c7d3ba160bb9f656603e2012f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_local_user_set_password.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f59a02384faa7e48f5324c7c42c4e7dfea500b86a3403dc8a784e410df5b007", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_volume_clone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0827f72ae8ec63976da5683b184076adf7a301e9927437958d8aee6f37b8507c", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_ipsec_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5991c48d8b70898412ae6f6accac319b1ee649520545674b2954a486edb3cbb", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_volume_snaplock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c717e53bba4484c2aec5d67bcc8c8dfb98eca9ed4927d99ed7a6f6b1059cd471", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_partitions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77fd02245a662c35c06eb3bd081697ddd22ac497926439c17a4f118119091b43", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_net_routes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef3dec2614690756eec2a3d5cefc4bb89d338b40193ff32c51ae38ab14274b38", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_wait_for_condition.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67904a0f3ab4dd3d6ccad00a3f792555640fb80c8c8c5583d27240529044d258", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_disk_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dec8f4e8234b6cf8054fdba8b2d618373359b30d7cd0ba1d508bfb08ace4328b", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_ssh.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22f0ff74dc6c93e76a802d935a65810217838f55f43c01c241ab35acbc6be12a", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fpolicy_ext_engine.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5fcfef2981a3437d0f52a67836f6ceee3e7b52cfce4a76aff4407fd44d4d2f54", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_nvme.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "887bbdfad5e8d344bfac7869e8db1e9217032e526b49974c7e611bc10f28996a", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_net_port.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d9066785ee7075af0857d0060c2b99ad555533cf68de16c01373adced972eee", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_lun_map.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "715e1b085b5efca021341bd7acfe60dbe6f1b1b6aa085cb1fda43a273b7137c3", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "734c38050413783ac404c333cffa25012eb236e80e58c2fe12377f3833df8ab8", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_qos_adaptive_policy_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9947e0811644a115e67f51088a5e871f75f09bba98a7d89eb6d1e70acc38eb10", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_certificates.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86b25e1b63997f6b23ed639afc080e09dd8990314b4a7aa55878d5aab77cfe4c", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_autosupport_invoke.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0735c98e23ff56e72c9d771493f1b65ff03913429ea45022aa0e7040acbaa5c", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_quotas.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08c33094ac72558694e35b2387a302642331ed3cd5abcb22355de12ce3956ef7", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ntp_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05100998a1d1060624e244328a5b6f1a98219efc07a7f59f131f4b216571e079", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_kerberos_interface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49d0a3e9cb122c19d590c38a1d409093b395298eadd3aab79ddcf279c61ee96f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_mcc_mediator.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8868d811d19da7339f936d6ac2d413c882b7fc3961529319721b98543db8f022", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_unix_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c22c0a02d035bfd19c8406837e8d47dd928da9ee024618f8d6e7cf46d6ffb084", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_s3_users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67a73877eab35459f95d84c1d0c488db3f339de2a882d457f6e988f2642d8779", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_wwpn_alias.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28f68b6a317435156239aa09bd0d194e3577f340c7dcce9a736b6ffeccdb0322", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_iscsi_security.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27a50cf3b57850f1379a7c2dd72e6ab0fa73eed251a41435bbb7f6429871815e", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snapmirror.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9514d382448a13c71a3ef577a9947937261f53c4f3d26249fad0e6c169244c9e", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fdspt.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43b14be7396b822046b5741b42983e57c415324646dab946e0da8ed84ab9f9ba", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bfc5778733d87532e36930bf103dc3c95edaf146455274f553a0e65627366e57", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_local_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "439a343185042c742054423e53dd61012bc7aa87bd59730dab59b9be95abcb34", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snmp_traphosts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a2337f0861ca407cf20ce882001691704e637cdf60727b39957e7c7c47f18eb", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snapshot_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d950f392c6da9173d4a222357e2f36417739c9881a7592a70ef3b9d69c28429c", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_nvme_subsystem.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0df707d04f2f61b7c577fff8bd4aabff7f276c9c853de1cc542f9c96bd94ed43", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fdss.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3ff587283b21b6c6668c7a6b640e8249fdea8f946e7bbd0efdd5b6c5562a60e", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_s3_policies.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2bb9007b1a8fe58b411972c2334ce95e7b4c9f6be2fcca75f7adb632f3a042cc", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_domain_tunnel.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86489a673b9f975ca9b02d8ecabf2bf94a8a01337db6d7bf037e2debc06bcfb6", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfae740aaf3e8c19be26310632565a603aac77a5baf757516dac453dd1bff900", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ntfs_sd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47578f1e44fa10c688f7663980d79ede4f3e015b1801f976804eec94f1b634d3", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_lun.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d44070a28af9662c623e174e9699de994fda2ce6b8626bcd6d3e61e6867a705", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_zapit.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "294c391daa01eb7eec025c1d706a8ae072b31499b42db5789532df980926729f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_local_user_modify.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98e24f6c4fdb0de3bb3afe753fc0b94ff950bd1da9128d2b6d64829a0103a138", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_ipsec_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecb00f9cdcb0ad1dd6633086de0c753b608683615d41544499f5d68a397a2a8c", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_storage_failover.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aafb1c7c767d546b907f66a5ed5729e9f99fe9111d1eb5f51cc43957eab09697", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ldap.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3998d951ace155e655e9d78b57f94d5728554caf3d950ed105d45f2b8738d605", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ldap_client.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8dd8fbfaabee3f155b8d357e0e8664b3e6d172bb8ff8df31f0be9cc22ec4671", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_metrocluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f03edf8f8d35facae1087e66bb5e93e5619f67127e183f38f9d3748a60631a8", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_net_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "794b7f4fdfecf01e5f95fc94dcd954e956aaea32ff4d678de22126420d0d320e", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4287377fb2c9f65556b9dbf64ff6ec467f81fc5d2cc82ebb4c2c16ade1dc1de2", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ipspace.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c20e5ae3e7266591727d67bb3ccf9139cdb6582ec5260535d3275fbb283e108", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a1b8844f0d0f5438c159b44b7d11c6d8e3f2394aa0f6e033b7757b7b9a061b2", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_rest_cli.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab45340f30ceac2a1f5954a2aedf18ebe12be687312a97827dc7ac2aa12bc0fb", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_unix_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "911f0e81f9df60c07e1ab3eb29b4dfb2bb193c245376bdc145d732acb716286a", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a885621b50e69e52bee2588a3c940b52a0c70a46439617790b20f3611b58b89", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_service_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ac05dab425cafd7de913f81014eb10d856b5b99612bb063cdd7ca4e5a0d063d", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ssh_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "033213f3c4f1570a832e62dbcd02cd91547a3705f9322a2bcb6e88dd046138da", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b02d35878a56eb92ef14f3c93490fa24a620f0e0e51f9474d51e1202ecd361f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_login_messages.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb1da6df48e95a15f43e615f7d36a230f7540adc96aead7dc0d132682956fba1", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_file_security_permissions_acl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee2a8e5945e09845ad851ba4448083d9d6751f0f4ae98ff04372b15052c36ce7", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_ipsec_ca_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3ad2ab4be07a019dd1ec36636a0f5c3d6ced41e394d13d943041be1bf32aed0", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vserver_cifs_security.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8636394cac6b7f0dd6627c8ee2022ee23cde397ff72437cda700236987606fa", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_s3_services.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb9b2ecd506650c6277c7db50399776c44de92a479085ded69e4d4be45fbb06b", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ndmp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf6e8193c76318336daf2b5f72c57f68e8406a7569385dda438454642b1d52c0", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_object_store.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4143d9ef5e737ebd658229da1bd0c35b83abcc74760a32d8d2a0c7627b2118b", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_storage_auto_giveback.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24dffb7ebb9eacb949cd7dfbc9cb9ec35da18faa3176a239f1214da8282b66ee", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_name_mappings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83bf5f628ba9cdd88ba87d7af2344d97a290272b44a0acd1a658e46c86a6ce19", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_qos_policy_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b68a80bfac9ff1437326a1ac17bf296988ba6cf356d178970414623f608a9a1", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fdsp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7f77d9ef96bd752ead3f1cbcd6618d623e72cd44dc7d7467c09ef7d915cada7", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_node.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67b359d9c72f3768342f5ec483cf2d201e0b7c4cb02815d5f5e2e886d1411adc", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_acl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "721897c0266b55dfb803f966b5130815893c8adb6e93f3a04d16746e45ccdf56", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_local_group_member.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b22bf5f5712efd4aaf94e77e36ddf0e93de98ef7bf7f33d5d7a8f02d5aa4959", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_s3_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d6da5e1b688114ca6190eb68020639ad6c1871b900e992ce1ea59d9c240b776", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cluster_peer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "439ea7093e16deca7b055bb8b4577ee49fe1579c56560261a9a618a853756d37", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vscan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f915dbc096d0a587e1d29bb9f21a949c59f0c4e77130ba296e1e245189cb471", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_nfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3f57bd199e8bb2bf6fc88de81540f4def926757d083abfafcefc3737885c674", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_igroup_initiator.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e0219793ff679105e7cdd12fb92ed998d7d0286fa4616881c35b48c20f93ca3", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_svm_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04381694d29a462dfafbd9da8d6f52ef049040dd1163f26e58b8236509e50caa", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_broadcast_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6249740f48db9713cce0f3e639284a9cd58f589d13188161218fa36710589850", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_volume_autosize.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3422d91449b47b99e93c9a1ad1abfc4fecd6b145b7f7510879be913d2dfe74f0", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_iscsi.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2e7b91f4cfb6be15022116fa3a93ceb54f1022c4c11ad8a707091a3f220990cc", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_s3_buckets.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a97982a8406b0fbf366d4200ec17fbb8b2bcfd10b09356153e2dedc8985f258", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fpolicy_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49131f89a89b6b273c6c726b270ebda313c32da542a66c83728935adf3018d7a", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_interface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f96b7d6a266a5c6871e6e512e2ee7ea77a2e559469bf7714df09d361333a97c9", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_firmware_upgrade.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de1c31346a9f8292186779eb5cbc2bb49db459ca07b1cc6611c2cabc65ca0f00", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snaplock_clock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db97b8134647db5389d095736ec4e486209f24d3ab71455e8451e022183cc14d", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_bgp_peer_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0105a4cf2f70479304fdbe47dbecfe929e1c9a0d319c2bdb801c10dcc0facff3", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cifs_local_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21edd1b6fa460bb3a0e429ee2fbcd4f786bded96934b5c16dd7c61bfb5cbeea3", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_motd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36788820e3272aebc7d148217e8ab632ca35fd2be980e314d9347245f30ae4d0", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_svm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5807c3522331a413d68c8c6a1852065ea500b4890a48e65708765431440edea1", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cg_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50138c1b5feda5cac7f79cfba886bb22837ee33c851f5e9975d3951df3c8927d", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_volume_efficiency.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa5c0d9073153b4488488a5d1d2cf88fa5da128beb6aa2317b725cbd0e75ed96", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_name_service_switch.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa860755cccabe4eea3b8a64204a5e98d9fd75c706808e4d08f04323cf8536d6", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_qtree.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1dc7716248addaeccd57f50cee498a1e7257f5d0601709ee310429757bbdbce", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vscan_scanner_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3852ac303caf42cea6b6ff90f851fa78b51d400690edf3acc833ade94c63f365", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_firewall_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b9247ac4f62bb4e70d9ba87c7ed1efbbb103b4c18e77d4c923a0b3ed332dcec", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45e9d6197ea7263e1a211b50cf63cffe0826e098d4b262c37b8d47eb2814f05f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_security_key_manager.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4eac750c0d059a681f2e6d933611684267bfbb434898d701029a259276fffc64", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_debug.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "856190a449db8166395fe0480a933bb8c99d5fba53a4073db784d21716986076", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_net_subnet.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df862ec324f221d7af4a2f2a87e903a097dc6c322b31de1eb618cf9963dc677e", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_autosupport.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1852d5b146cae589732b96b72c9b13676ed752d9b760a7422625bd55bc1846eb", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_portset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49c011be8a33ed75a1160c22e209e8f4240b45b0bcac3c1578f5b8483b2bb1ee", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_rest_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2856157fe4f1c49354d226279e8b48e2307e85ab7c4aa2fb06d74406f4e6ff17", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vserver_peer_permissions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16a75e8328941fa824ff98aaf4cd4c8ca26b5df10998bf8d12d02b8cfd979daa", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ucadapter.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "985997c26d599ac30ef04d77981e7c71fce387a83b5a4f7e19b74df663dcb31f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_broadcast_domain_ports.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a4574e536575aba32fcd820cbd7bb4d7bce7734d42fd65407032d2c02a3394f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_export_policy_rule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6268758ca9e6bf19ecc2ec77874a5de0c52deeeb47a278b4cf59a10aa999333f", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_igroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cc2c270d2387e426d02fbae6b6211b6b36f0e24c438f32b57b6b673a4370815", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fpolicy_scope.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07ab2ab2a439588c7eeddcd1a1b6671b7636e427948dc2c985c20217f0676ec3", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_net_ifgrp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "700b1c21b2fceb7004f7e21525df82b9bf75e5a0b0b09b743b880ba7ae9c88ac", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vserver_audit.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3f265624355e196e82aa049d1c9877cd0ee522672ed2ea32adf0fa1ff131c05", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_software_update.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13bef30174fdf41cbac92879e2ad3b6d9354aefadf7bff8b771c7a59a8d0a7cb", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_kerberos_realm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80e39927bb3588e6d457eb6fe30d7693ebd174c5984bc04f289a317ce727bd19", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_nvme_namespace.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d4b1810fdee8ab79d43f09e51a7abc5075892fa372be590019e2d88c9a3f7119", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vscan_on_access_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9a27f3d5e0f47bbcce43e1b4725e7d3bf70f53a93295a0ad5a574badf4a95b4", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ems_filter.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab8386552e4c7a9b42cc436fb452086767d10405fbb4b1110e12b88f1d6a71d1", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_active_directory_domain_controllers.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98f7b22fff92d432fe032a71ee67db00c38e1333601ea1a92e41146284955766", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_vserver_peer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47ec3d718b88fbe09abfc49a2d7c4896a11d8b1d2158ce1e55d636221f70f7d7", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_lun_copy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2718b2d03122311e723fc849d9eaefe0e0c60c50885aad032a1c63f718efeec2", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_license.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69bf1f7c5b83c10c847b0c8585d37ce6bfeaf8b77910d136f5883a29d886f441", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_snmp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbb6b7ded15cacef91b478955e289e1dd3baa6a9723e7605872eaaf61d0fc56d", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_file_directory_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e037fee5b5d1d9766f1520913abf366f0453b7146db34b49cf0ee483824ed09", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_disks.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91deee7578b7957604f57e3659ddd69e9eea92e27580a9587d0e7d34e2ab0188", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_ems_destination.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1388814e0a0917e90b444c4e80339b90be71b0114af3c501312741cd7155b54", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_aggregate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8489dcdfdb928faed12e8cb49ec415022b0981d05060de7166b3097acb3e55b4", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_fcp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fa605fe5e84beb48bed1c9c46a428887f57ce6c705e595ea65b0718dbe4fda5", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_user_role.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c216dd1120864f19e66761d9dcfc425d6dcb667510208d8ec98572718c17433", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_cluster_ha.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f91ff7a0078eb26574cf5eb41fef9a5ec2767484f572a28d2e02c65a18d4672", + "format": 1 + }, + { + "name": "plugins/modules/na_ontap_active_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6579eb6a5031f6837c8fa766470aba1c5c0ae86e2015e4a590d57ec19ff47b0", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b54e00187b28316aa1b05ba149eb716b8352882519b9ca657d5d6df07eb989c5", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ac960fc2165599ec9e2d99c070898121aab8950f09e39b36f075985ceba49a8", + "format": 1 + }, + { + "name": "tests/unit/framework", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/framework/test_rest_factory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d817599e67413b1424951c2f1d19176cff7e2cbaa1b9420f9ec4982d3dc963be", + "format": 1 + }, + { + "name": "tests/unit/framework/test_mock_rest_and_zapi_requests_no_netapp_lib.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5c89dde6d2af3dd82eac6c7ac3308bfcc12e3ed81efe2cf2ffad63042140c66", + "format": 1 + }, + { + "name": "tests/unit/framework/zapi_factory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a053ec8ab0da643938bd50e24e62cf1e998456c7b993b7b1997b50de4769d117", + "format": 1 + }, + { + "name": "tests/unit/framework/ut_utilities.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "375d01c7c1058490d252784902caec463219d57135b4f4bb5d2a68f75d2b7672", + "format": 1 + }, + { + "name": "tests/unit/framework/rest_factory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c33645d9ae73491b4096e10f2ab3c45396fd0456a6de2d2fa39795119a717fb9", + "format": 1 + }, + { + "name": "tests/unit/framework/test_zapi_factory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289ab36088db0d1e14b8bb7e574821d95a8ee5f3db43cd082d96665399fc131", + "format": 1 + }, + { + "name": "tests/unit/framework/mock_rest_and_zapi_requests.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2eab2ef537fe8656e0e4f30d510050c41e3a2aec1635f15637569c6813e6d65e", + "format": 1 + }, + { + "name": "tests/unit/framework/test_mock_rest_and_zapi_requests.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95c84af27c17a57365436e69ae3ac0a94a9e43f631d0632723741e0171cebe8f", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_rest_vserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd38e8bd6315597080e51727391cc2dd9968582d6c175e2abd49e6060f79d975", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_zapi.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc7915c8e5613daaa3b4605ff00b4fd70dfbb7bacb1367e9f1c69cd471d5a84b", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_ipaddress.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "176da32770b61471fb61fc324e4077ea558c2ea2a97b43e6fcd30115177db17a", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_response_helper.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "821f641988c639eb832bab359d4ccdaf77113d72682e2d50331b29c4f2cd7993", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/ansible_mocks.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f2e8aee2f8ae45bde86713ff4736e83b8be47688edf3d40c048d9de51bc31cc", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_send_request.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95999678cb80fa6c7f2a2ff0b71ea18c2810f9ba4c5e417cefe8ec2159f88d20", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37c08c3614f960da5a0038a2fa66e5c8720476e64d71340bf7f0e46ee40c631c", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_rest_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89f20f81dfdb5878c367315b6c6a55287bf6163a084e805be523947eaeff870f", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_rest_generic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7addae07eedc1811c2bcb43000337f79398a7a7dd8a8db2715bdacc2ba93ec03", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_rest_application.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7d08d84807ee05b2d25c6df566ee7a02f2cbdf50a5f2d2cdb51744c985b1c0a", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_rest_owning_resource.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec5b1942cf5a0d60514faecdd95b4ce01041e9a22f52fd44fa3a8277435489bc", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35b43884b70e12c3d4922ffc558caf7de328a2955a9c6321a2ab4674b546ef4b", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9f4b3373f242bbf0287f405c500a500905bdcd00d320ea2f614521dd7f842b4", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_sf.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ef2161fa8de9fa22a77629663b459cd6511fbb660e4c288463d8bcbf0879093", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp_invoke_elem.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d0f5ed8b08a959ae34bd1d20889a7cfe08623001af510de96e7605b8c7fa93b", + "format": 1 + }, + { + "name": "tests/unit/plugins/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/filter/test_na_filter_iso8601.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9abe21559386c004559c78b7fc5f228e67290c3b9359219911b9c1fff4ef975f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a11eaf1634009219de9ac1fea1876e3f645779e679574d6926a063afef86e4f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_rest_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72fa0742c19761dd2be646cdbff2b537eb8659eb4be46a13082cd422c649288d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ucadapter.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77fceda7a3d5c70bf910f461774ec76cdfba34430762f8fabccbc162beaf6bed", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_debug.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc38fa1c21613a00308c7c8c092fbc35dc77a170f4ecd4a77ab8b986cdc2d42c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ems_destination.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a70b09add9149c9ad2c74f344c0f5bb7c8e2806c2989bb722e74395637e6e20c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_wait_for_condition.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ebf8d2d5dbad32b52ae39fe184256842bad503e06a23ddb0b480b87b8dee672", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snapshot_policy_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28d0650df1636d66970ee053297cdb23a3bfc6477870c7a439c21f1c28e30572", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdf9b1ee67ec49c2df619de50d4e2fe1ca0068792020e1abd6a3cc5101f1f4aa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b4f315e47e3dcf313b0a3f631683c69f694c2afc3351b8376920c124ddf1a37d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a58612b79e4f2311d9a3f95b9dbf2f0d5586fa4078b1b523cb1982c49462d7e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_name_service_switch.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c88fea1c893a1ea80f71c9f342b6742003180e0f168dd85ac0ebbf66cd64a26", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_service_processor_network.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a86d5f8fddd37f27fb360f58b3fef7cc8971d9b92ca8bdf5c51b8725d323191", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d79a63129448a442110e2118f7df68aae0613d5509aab5514a31e4ddf55b2ea", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fpolicy_ext_engine.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf17272c65a2bddcc15ca111934920a7f1aa95a857bc8a653a02d9c0644fd2e5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_rest_cli.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94920730f19376bf5c56992f84471791137f2f43528efe8c585df545be41a82e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_software_update.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5783ed8b8d64461fb580fdf6edba5ecbe728b86c6586b44878520e8440538a7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45d8138a1a1d0e3f5b0145dee9c94da791ce6ea59e9b85a0c37fd28d7ffd0b26", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_iscsi_security.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40cca8291e9a73ae162a5659c5a0ba6e061b15b2ed1a49faff1ec254b921d4be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_kerberos_interface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e26f3f0cd614530ea1daf14d99261d01f7f4efaf44c8b6c80a7542e12fdf352", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c882a166465a3b80cf6edf4b333646a12a7fd78e5045ea59702d1c4458ecd6fa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_svm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68d2fd805bd1adfbf387c926f4a7023644130be7cbd9b27a89cfa4b59456521c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_qtree.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "135e3ce73832884c3dc61db8a3f828a84c6aeb7e0400c3e635755edcddb67edf", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun_app_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6adf9786a92bf2be37c60329063992de4250d32875ec30425acb1b68a573a33e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31c88e617cb79d4efa45c61d4903cad0ec354207097db42591b14a5caac5beef", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_acl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "723fb0d0ac051b252a6895f8e768c7e4212c3e395d4952eac7d72ce273151e9e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "239fcf51e3ecfac1fd76009f17474b18ac9fd997c2cee8be8001015d118306bd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df59cc1f7a7d44771d3aad931aa70079e2ae80655444f8123b78cf27744bfdd2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nvme_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45b88ec5b42e05cc55a1621268b485333a507bfc37810675a00a8e6d4428de9d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_portset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cccb8d314fb62dbc8f490f31c74be4be801a58ecca92f796e20c392f43c68bb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf2fb066ccce6bb9cf50e0c0a646109cef0f5171a1f7d9a67f64b4319f7bf06e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_aggregate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31ec36a0875f3362d8f1c09c3afcc9f1aae8079682d75c526e36d6cb201e6903", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun_map_reporting_nodes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0f335cf9f1ce1d7a1d0a5428160e4576a95eb64c46e4020e671b8dca6c71869", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_name_mappings.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d6a37545df65d493f0f2f2f198de1aaf192a02478defc4b4ed5ef3ae74ebc5c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "637d6977a9ad1258222b8201dc194e1ee712f81899fa7ed77376a2c3d524f759", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_license_nlf.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c540877e6488a5c38b2b1ac024390cff649695fa4c22b67a608a942132f074c5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_user_role.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6084eb9f347977adea709022f05e7f0badf04faf5ab4e51df8f3498f122e9898", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_disks.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d140e85b8cf2a1ca5f3c0a051bbc31bff5a8e4f523faee04f2d89f234f7733a2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_server.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20b53adc4d0ae871cd6c7431fb766bb7464d3210b79ec318dc44c9dcc3abe42e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69ed0a60e6f2e1cfb23fb94b45aa891913a0f10c7eedc3408fd882053e4d0703", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_s3_services.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eac0a91c90b250a0e5d4093d22e8837767199c4de718dc70854d94302ba85284", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_user_dicts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20a669de5baf92a8df5c0dd79166f7974e2ed3987265bfa43bba9a5a0029c7b8", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fdsd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90f9440c0f84a627acc7f25a37e2dc9dd928868075d71694c53c8c07653de29d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_file_security_permissions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a58ae40b25888fd02e09cdb5099cee7890d7b9a8ec802e7033746c3d6cc6814a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume_clone_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6ea04ba30c9c2a8245a4c5d8b8d43750afc1732e7128695fe3a9be48060d6d03", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vserver_peer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff86723b2387f8c3dd0dbe0d92722b480d8ef393a58ab719045e62d529db7e67", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/CACHEDIR.TAG", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37dc88ef9a0abeddbe81053a6dd8fdfb13afb613045ea1eb4a5c815a74a3bde4", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73fd6fccdd802c419a6b2d983d6c3173b7da97558ac4b589edec2dfe443db9ad", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ed731b65d06150c138e2dadb0be0697550888a6b47eb8c45ecc9adba8b8e9bd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/v", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/v/cache", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/v/cache/nodeids", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b02e3eef79464263dbecb6fba2aa5ac26a25116d9269c21332ac857bd9d26d5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/v/cache/lastfailed", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba6bcb107e0114b5becb0d3250b74c5e53a06d793b0ca532b3e97c1467ef027d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/.pytest_cache/v/cache/stepwise", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f53cda18c2baa0c0354bb5f9a3ecbe5ed12ab4d8e11ba873c2f11161202b945", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume_efficiency.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4b717f91eabb228b774bd71c267987e0aa838cd800f32dac9f6390cb333f66d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_license.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2ee7600fb69ba7bcedddae7a697b889c4764c6a2d934851a48fcbf0867afd9e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "412441cecdf2680060f3a3a37af66b33a271a924b636d4e541ca239d96c6c53c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_firewall_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe899d36596309359be7bee5484c49dcb9b1ad7412fb1b6d89397e5f449cc6e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snapmirror.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f9a591c29a0560042a34c108dadd408dad7e053020f3fb79b08570fa0b28304", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cluster_peer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23f42d9f923f12a2ce87fe0982d85ec2e6c663c174f6964bd89bc821db014e39", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_storage_auto_giveback.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f1f1f2d03af48a8b88487af5928de9488ee9ada36b092ad11016763a608f2a0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_s3_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81912e2350944af67d19103b39bd1c4bf054da383ab0d7d6e2dc8ef6847a5bd0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_active_directory_domain_controllers.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ae8997702f02cb2e7ddad09e5661302d3c20d5fb2435934c78b6011629446ba", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fdss.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b9ab089073bdb60b15a3194e75b1d59f273414dd3efa7aff85373f100360163", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ontap_fdspt.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "518b33fb319eb89ab9b0c7ae1348cc75c7ea43d9fe7abdf4e439ca5a9ee7a7c7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ipspace.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e15b980270eab97f543b8b49104cb26de350d1c83b0560f8842ed9a68fe9a443", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_active_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "432ddd2e2a22ace67b7bff290488f789b2f76f6279d43b032fadb8f2ec2f7336", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_object_store.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f7ff9257ea9f2f99f2ad9b20fc2c644c627fc9ae121582b1e6851000d2689612", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_user_role_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f96d0517f647dcf3aa80acb9f64c6fef0a6e9c6d3ca45c517ab869dc07192e4", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_template.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba7d1539a6e59287219f286157a3e1304fc0345e3db59a69e2ae928c35bdf3dc", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fpolicy_status.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6522b6152dfd30cac8f5683dbb404ba0552d99aea937245add1447c338fecee9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume_autosize.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6176d28feff6c7b67c3f83c4bd3447e2e0ded2822509507e95391b5159e5e9db", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_key_manager.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b01b9bd8f7064590f1450647b2dba19cfd616cd6a452b2db095cf179216bdf15", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d27200187ee28b282c7a97443ef7ab0eefe776c84f1c16b6101f2aa2014aed5b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_local_user_modify.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9888de8ef7e9b8edf8e672b500c4a96f928865d4c5aabe1b2f633da81c7abad", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce572f17103f660e31dfda9bc5ece970aa6cf4a4f349e080ea5c8552d1dded9b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun_copy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8ae05d8b13f2cdd400a613d3d1ddcf3f25eaabf735042bb865b8d3b458542eb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_partitions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8bbd97e8d0280091b5c99ab9850ef427bd68aa656ea9cb35cafaad440ad323b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0aa7fcecd68b943423a7e5d9d353133e1040de8715698414e3a21d3ee97326c7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5e41920b0d3e3138969c589859a0bcc218653781094d9d66c04e9924750333a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_net_routes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa0e5e4cc2ea9629c6b716fece534ee0ec322408c148e1229086beebf795366e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_interface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43edebe1ffd56436b42f4c74c1cbe8e0f7aa62a5895d9a1d406cfb865311ec83", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vserver_audit.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74071e0c975872d9c41060957acf337e8571db9d82f77c5a75445ee0c436b5be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fpolicy_scope.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1dd7fe0501b3c18d5ba44f5fe59543295869d3a12945d5dc508da8f67292e5c5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f7e45b1801a4cb0850dea363c85fd8ee3d83fb3bccc8c52fc261dd33a234d29", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vscan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a863d465ca2159a316134ecd907bbfb4e3e0e13fc780ee3f4c78be7ec451091", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_autosupport.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9f89586e4b5b342fd037444c5fcd506b3daf46a6253efe20c15284552cc03f5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_local_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc4a68a6655f8fe92c2b1d65daa80a151c7cf7f3058ad386ff308ef979d3ea31", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_igroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dac63bbb045ce05287f426b9d210ffed506d5aa8dcda29d9becd7123f4de3b39", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nvme.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23b7ed25be938e848eb65d1bb4fae8dd84b59ca06d3470964bcea990236959a5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nfs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a6e90748ad3e6e053bdb8695e1dfdc81fa081b58805a22f2ed98e5881495021", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_iscsi.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccb8fe96c6478358b62c9b8b966a729218fce8ddd599f525cf1a9cc8216e52f2", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df09217ac59f84918fe65375d1f68632172b0ecf0783117add3ac19037c1a688", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun_map.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9fc8c916ee6fd10162f721ebde64fbee731b5b3bd40f7201d8ac99395cca70a0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_log_forward.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3132d815d480100cfeb27fc5b399e5a5a718125ae5a12105d13a356f22ad3bae", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_restit.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "338d1436f4e001717dee8e2a7276c6f8d061c4ce5a56aa2bdac670da41642e8a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23c30dbaacad0f9ea68d368fd88b0459ca4b7ea458fec5c4592c945aa97ebfbb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc944b129acf4b822b63a4cce82fe0595626f29b01af89f6107110cb5255684f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_net_subnet.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d6de7ca708b3df99819b934d700d45fe68c98468643f29526e40b12d2a921d6c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_unix_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4b357baf60f8e6594c903fb35a759f8f3d66fd66b7f1a0db576e80be2ff9817", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2617b874ed509ed38d09b4a64c38cf130fe307a1546f011d56f14ca7b9ac88c5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e76cfbd5fef74d5dd211032a21e0448166803f17284bb940a9ba6ec0223c27d6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f20bcf49ce34ac78b43e89543d9e0f63a06ab291cc4e820be8121345ff6400f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45a10a5f869386b7a112d2d10da5bcfa9679537714fad526e170fbe883befb06", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_export_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67ceb754cc9bf0da15d235d04040fe23b625e13422177d7ea1d1a1dd0d3fb6da", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_local_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7e2b14d970b0d72f866d60668995246573502df8433585c79e7d4dcb227ca64", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_ssh.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "939375929c05712361d8619e1fe01b0e85fa3deb9f6e64baee3229120cfdce52", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fpolicy_event.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc80bf623c8fb0ee6abaed3f84b4a52b75391ff5ab3ae1fa025d1d4e6283598f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2815dff8ebb32b473e88be21a1f189849fe13955f067eee0bcd67d0ea683160", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_disk_options.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cea4952dafb1c18075b93e8e20c392cb99bea743699fd6ee0153efa5c53fb9af", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ntp_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec21399281f0ec9c9733e77b45666cff24cc635bd36f0c8f22de6c739af955b9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9d073514e94f07b484c8478e5529a18168a1b40e09dfb08c0a7e7d88d811435", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_certificates.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc1820aeab587ca1f867c83ec5cbbffdbe04e949434b42cb70736e21008c9638", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_metrocluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d9eead8fc8645486c85cc5ad90a0ee854190c6c623857f1a7e0cf5b51a28670", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ndmp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b50b8aa8717423467c3c8d6d6967b71d0879c2f1195fc90b6fde7d76c0d501c4", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_node.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9293b325d46ce8b2d455dbe8910d9d0819529f7f6b9640f8f7a9d46c65f87a64", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75a681adee6831f0bb12e64b8cb9ee95296d2c5f2b1da030b36c416546cf2980", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "497f8b65494da99ea6bfaf2b4b26d1ebeb610e9d86ce9dc0ce08af19fbb0c51a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vserver_peer_permissions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e16f4689c12fb1c98b14f7761532e717b85adb41df54058744621dbcb01e87d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cluster_ha.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37720400cb1ad01e9ee39db222c6a91eaab11a7007210c54ae40de2346570756", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fdsp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f50438a845b5aae6529c31089ac6864a039df4daa391eba3cee516a41f4fa19", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_zapit.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "865e998158298b4ef517adadf9b02024d240993f733b58ce089edf9439380dc9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6913f2d53c36600336c7f99078e0c5fc24741bdc90d37977ad8fdf11d9658c6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8678c5e1a85471dd68958c49d4a88c36bb1a51729d40758b343a71c132508bb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_ipsec_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ef66fe0804deaf2960130ffc900e8ce9b603237faee3b474bde21bf36a6a5b6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "260c1c6a7eab1af937ba0364f015209ce0a510782e8534f94a2e01c8dcdeac2e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_lun_map_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5894c88ebe8590d813e4e815e165fd6b9615b0355f998ce15a8a1eb828ddd21", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nfs_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1a7e4e3f5a7f7425b1b5ddb001e1e81e3e4d8f8364a9bc3fdcf056287a66b05", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_local_group_member.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54376372c7bbc4f5977d55fcf056ecd40f589b64282f16bb7609bfc1f93b0cae", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_local_hosts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1400c9176b16f8125aef3ac43685b5aa400c91e3aae195cefa9183d61db6c16", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ems_filter.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6bc41fcd86c0af36511a2a59f3565fe8d3bffb6c6acd0308a2096d1bfcbc8ff", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c02973d6713c388c561b3394c4fb4967f4c5dbdfe5a88cf19356e503c2b51989", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_ipsec_ca_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3544b126e26197ac08864ff30312a3c3b646c8ce3e1914049ad2dacf6780d0d0", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_file_security_permissions_acl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ebb279cd1d998c64317aa566f192a682d5cd0612364c899daf00433c609b760", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6703a6c2f9c4a9f23a33f04a03692d8ade5f6401c6eff2a71f22106a2379485a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fpolicy_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "485d4eb82d17c655b9231120f60e579982fb1b55668e737351c68aadb27cbbdc", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snaplock_clock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36acd3ba4f32e66211e7b9bc70db168643a0b3b70ba755a25027034a88d689b9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snmp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2745af5002bd12d7f4fe61c270684cce7345e251aca858ac4307ed867febccce", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4c15253d2c7e67fab1045aa1dfddef4f28a0254c83e7e8e9fd7ebe84650d023", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_net_port.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be7a7e4158d702ace1cfe51bed974a409d5bca5fc9f01b973d07c8ba6c1da6df", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b0d9d7dbbc5f721884e0348dd65c5391b2b2fd7836b84c3a82ef4fcb8fa82be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_domain_tunnel.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9fba8cb7a4fbe42282a84328be287a382313cc8642b977065bea1dd59cb38f77", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_bgp_peer_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4afea74d0cc7177f673f78732e43bd89312e1a84eb84c7f745bd2c0e77fcea48", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_storage_failover.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8d3e1f71c942eb1b11abcf15cbaca0c95615bbf203ad079e5a9cd2fb795c981", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_fcp_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9654ad266cd800cdbdb5600665dbfc8af46b7819a719473542b83fbe985140b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nvme_namespace_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0023e6e3acea503c5818785c7099f3b212c2bae7547305e76b12211d2d74a5b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_job_schedule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6dd97b5adebeb5e12ac3e62c476c0770b2f820623da6bd7e90e1ab468a48ab6", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76fde0148108cf969f4eebdf176b3d68fcfb3633d916de970e5e0311780839ec", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_s3_policies.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f728307aa894d3d977707819adc128986128eabfddf46e2b88087afdf771e50e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_aggregate_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "298c0aa147cf45e941ee0122d48ad031836b4ed3c0929e45616731bb46b60e29", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_s3_users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00800e72e1cb93aca34f7e4e99a000ac52de794388f8f7b44cc1b9fff0bb5fdd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_unix_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ceccc639b11b87d36f89cdd4b8a63eee769576f68164ac7929217cb41d45b347", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "075a8c6ec74938d53150a44c98d631cc94b6cd9d65be180651d8f03e93855828", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ldap_client.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70d71c546a3b5f51da0205cbd014237587688ff5f5b0a612e7171504c368f242", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b635ef174b9a7209602a0297efd2900c9149eaf7766c1a7e0cbb041645b68844", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_nvme_subsystem_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a230069d225bbddc77a8c183293cc016c1dbb4e4109e54dec037e164cca0d4b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_security_ipsec_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82882da8e7ffdcb7687737f815392928359b921a2c25178c8b30501237150e4b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_s3_buckets.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee6062bce396b3445bbe84779f736e92f94096ce637a2e7dfa0cd9b8219d625b", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76fcb403ba9d1af5cb6e1fcfd17b2d9d99311e91a734540ca14dd424e15cc8d8", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_volume_clone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfb35d889ea2cebb162eba16c21fb536cfd89afc9ef339be341423b3b8b7d2aa", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_motd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dca7ef081a87947ec9bdfc1090fc4d38f02334710cafcb6aac69d52300992b4f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_service_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed42584253a2226777576cef42af7944a54c8a6d21916594abc2b4c0e48ae647", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_net_vlan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd174978e7c425ded0f64f1834c747552006fa1486777daa17ba4d374d8ffb17", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_publickey.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69687762a3ae3694e24895c86d391a3a48cb433d514913b46bd06a6299225a20", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_quotas.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c0f7f14ab2dd8b0a4ac9ecf8689478a1132140c5b28c445ecb2da4afbc18482", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_dns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d9a65c36d2f55ca4cf0f36774515f5dda2c1c1e697a9d3e5ce640710da29fb7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_flexcache.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ab58224aea87a2ad113901e4361900fc41a053063e0e288f951137c9786c5e5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_ports.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3413e9274da220c47209e8d018190f8006f9beebfb568bca560579809c7d4473", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_export_policy_rule_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1cfef621bfc8ff5c0df531de7851a5fbf26fdc521ce0f375553c32e4c1d8712d", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "850b01d0b92601c8081b43600950d4dc6700c5cea9630ab1e73c10dc30c2c268", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_login_messages.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "abb10ad51ea24ebe88976ce2246df3ba0ab68deb0a6ed29842db35a4cf631699", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_ontap_quota_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b239afc2e810c9fc000416a11118999c3a62439b822d224e1b952891311c9629", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.16.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.15.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a17a6c96cd4f8f2420149d6fb45af4d638afdc20b0cc2b190128df219dade42b", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3350343c36b48033fcf34615d9079d5787e33d7d4772d97b0acaf77cec5c0dcd", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec1875745063d448e6d9ad63042abac6f55d1e627f1237303b1944cec7c38bdc", + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1038889fbba0e97c200632faf2549ec6194fc13ffb08af2402cbf52e3b6e6419", + "format": 1 + }, + { + "name": "roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f33de859beaa674b84ab00ac10df175d949078a82548dab9f144eed2be4cb5cb", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "407ab2176c0ab9b7a76cd211ef521707d7e235c54541675dded3072cb7986396", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c39e1a82c97fa762f3a69e75a0ce6a94fbb9495644ee13bb5317946cfdd38ff9", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d246e4414698cea9a28832538e8d758995a82139ddabd071ac774b5d5535c5d5", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6374967f52d5b9d7590312ff195c489ab09b5b13c9331b53be9c11646f174f2a", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09a1aed78d518f0c1be75e821a8e3ebfccea529830af65c9b8277ec93c6f28e5", + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_san_create/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b20386ed515535fd893ba13348c997b5d8528811ca426842a83ed43a584ee3af", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "634e7647fe6c58c818988f7cdf5201c54a9bd5fbf2b5fb27c106db57a6da2b31", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68cda4e885dbc0136c38d63fcc51e5e0500dda6d0dffbf38be0ce00712400e00", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c53a65c2fd561c87eaabf1072ef5dcab8653042bc15308465f52413585eb6271", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39d037fdb3cbfa64bab9014a130c8c4444e783223ec54c1fc8f820b4532e5d62", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38945ac5667c2c76acd3fda9fb32e4964cb8c9d33e66186083c4db75ea3608fc", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b973da5144f6793f6fe7ed13da22bfd31b3617b2f47d413864c6aadf52ffbdd5", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94221b229375a7d7b223cf93c1c1310a42e7279673cf03ac0ed97ea0642c54ae", + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_cluster_config/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "278b21b418e9627f0bcf6ab128f23e111bda653cd8a946d07de51b627b6c3a80", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58d3b6fa8aeb9f3ef16ada64ff7d847abcaef096fe230210ae57ddf09c2d0e66", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18f3a09c48bc14d3ad330abece58b9c1eb7fa9e2374ac187b4133f95f6869cd1", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1dfd1a3fb85a2bd47a3c077dd35d478e9549448e309a5f27e67559bf214b598d", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b4c026ebfffba631ae8cb19a8de3f037e67a849f2baff3a9cf9d612b2bdb0ec", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9746759cc2df9ca92cacc1ec402d8ddb764ee22139c88094710aa10d82cdbad", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e32c08abf9649939f0e9d7b385ef233a5e8f6a181a96edf93d2b27e5a9e7a77c", + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_nas_create/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f8d115b56b4ce540f702dfa9ff1ddc83ff9e73860184c6a925c1686af8b4e0d", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "616506f6a57d844368d2af0b9a85f35525f55f18286ebe876dc6fc04bad1a198", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/get_interfaces.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a489626a3d8d64824a3741857a3e07e148f6871d8c5a7b688ead2e1b69351f4", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/get_volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fc185724ebf93470100d3482bfe7337ffba9a83c8be193f2975d8d23915d9cc", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b21826f2e58fd3f6b822135c7b15997b7b63f4e0e4a274eb1535f051398843b8", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes_retries.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68afa59e1e98c66725a3c2390fe6d245f9d2759c2c8a3569b41fe517e06d5eee", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/delete_volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec2a50bffcab0065cca9636a8dfff0e32f3d911ba850409fdefda3e87da98d6e", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/get_igroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17a9b1692e7799c2327cc17dec0ae1810999f5b5e9cc319580f18de8fc159f74", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/assert_prereqs_and_vserver_exists.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf8b86a843e3a59f2a7923db60ee0fde5f6839339ea8278d35eadb38276952a1", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99b87c05316b8f7e80d67d878a7d416476a27856410c8cf099740f245987c03b", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tasks/get_cifs_server.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35731ff9b9bbf85cac91012690b79b82ad67c82d459196845b856fe4d5412fd9", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a02a97ea2d19e67f3fb1b435a3009074d8fb4533d6c20a6189450b5c0687b07", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f204e27fb7d09fe69e28b7ce3df5aa1beb91934eecd789df7e9c7bc8f535121", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1df10f30deac837d9b3d4cd00868409c2b82b8e7ff0b2dc00ee85c12fce4c3c6", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d490eea67f251e6e7c0c5e9ae4958ec52cf5cc94cad3f50c4a021f37944bd373", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/.travis.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78d857103340db3d01f6ef7c14f6d269f6d34c48097adcea49736b17b80b1389", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_delete/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f8ebe36043811fe05ac20de7072d909f234bf93d59bc2cbc727463594609f5b", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3484ddc02f7be4609e5fa13f8d746fda5ffd3f92ce8f4ec3510964aa6022c41d", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "993b6014bf3780a1a9d771e0716150ed674d93e51ea786f55beebe440e743fc0", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39ca9dcf929801ce7e791ab6e36502851d501fa25afa3b2f252d9bef54318940", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3257c32777454f16ceb5e2b8a4ec523658754aa2e343c6a19df00d74bed5824d", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87c82405910802bdcfdba3020fced7bd62589ad055995bb81a42c4721c1016f6", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84ffc0ebe72a90d4092d78be05d47f2b657d0eb0a8d9f8b16c81c8a84e512212", + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_snapmirror_create/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f57afbb0a8418c33b9e55c519d3db56cc4b0655f8f7fb19d3b3c52636661e781", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42762512b6823fa77a353e61475ad8ad0f810a9a485c33d1ede4771fb3eb40b7", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d25e6c8828289e3073167dca2741d25b1e2e511df23f7ba7ea502136e91ae5f1", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/tests/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f18cd2124560f42363fd9a46b52cc2ed5bc847b6164fdb81bdf97c6257480cf8", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/tests/inventory", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74395723cb202397f3155578320e213c51fd5ec4762691951e354fa931ab7880", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1df8af3622b0b165fbf5b40544e44bd9cd5cbed5ac3703cfdf75bd82780f4696", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b745757621d33a2708c3453c006879893fa42b7b3f41f41cd8d0f5f9afb7c3a", + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/na_ontap_vserver_create/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f39b3ebf505478da94f656e5445b94d05367874257131ef48437ea31ac1864f", + "format": 1 + }, + { + "name": "playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/examples", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/examples/rest_apis", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/examples/rest_apis/volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81d54fc296e125c444929f21e5841729e8e5fbf175f555b0baad1fbbd6b7e9ac", + "format": 1 + }, + { + "name": "playbooks/examples/rest_apis/list_aggregates.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "224273a3706c4c72acb643882d3380b8f644fb7157dd5aed7148cff7d0de8dce", + "format": 1 + }, + { + "name": "playbooks/examples/rest_apis/clusters.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b654dc68e9b7648301875c1604a9ab7053c1597d76b2c031d56a2a7f62d33b3", + "format": 1 + }, + { + "name": "playbooks/examples/na_ontap_pb_upgrade_firmware.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03a18f5b77651445299c076ec34d9c95d6270a6c830bbacbc38244fa91e8d17a", + "format": 1 + }, + { + "name": "playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "220e4b0d9ff653104391e3fdedaf34595585166ddc146fa9999705dc55610a76", + "format": 1 + }, + { + "name": "playbooks/examples/na_ontap_pb_install_SSL_certificate.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a1444027676f29656e0c959088690b95c17158d57d63ec1249a8a3893915cda", + "format": 1 + }, + { + "name": "playbooks/examples/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bbbbcdcaf3f53f77f87b598cd922a66d96ebe94a2a21c06bd0e1bdc344ed6d8", + "format": 1 + }, + { + "name": "playbooks/examples/support", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/examples/support/debug_connectivity.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d65ed69e3c57eecf07e303a39d65d9d65fb9a719729c3946f7c77d4e76b7641d", + "format": 1 + }, + { + "name": "playbooks/examples/json_query", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/examples/json_query/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5285071aec96c88c8ebe63c0e6fab224918e8b91637efa60bd9705e6bd777fa", + "format": 1 + }, + { + "name": "playbooks/examples/json_query/na_ontap_pb_get_online_volumes_loop.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3528fb06fa2acf9c19a769189e1e4983412adef386d721379788426b4c30154f", + "format": 1 + }, + { + "name": "playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f0d6a78d4c154ab11b6a5f509eca2324e032ace0b7ccf99a9ecb31d3252c407", + "format": 1 + }, + { + "name": "playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "999817851e3b18bccee8a7d668c12988f5c95116c21e3d6050bedbc1d3b8d545", + "format": 1 + }, + { + "name": "playbooks/examples/ontap_vars_file.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f54ac0344e9bd529f398d75d111b7b9b8312fc753514362a51d4ab7812ad4bb", + "format": 1 + }, + { + "name": "playbooks/examples/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "playbooks/examples/filter/test_na_filter_iso8601.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5793b54109f5a156e7ddd01f3cdc8be9a21a1620777baf1bef512eff56dc239", + "format": 1 + }, + { + "name": "playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f7086d9ae639bc6d450529719704eae383faff589a7cefeb4f253ba953de596", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/.DS_Store", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a92e6026ea49ee8ad153d99f377c2aca1fb94d42160045c980f4193d2f5671dc", + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5910.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46a85c7ac44419efc60f9dd8b4829b90870fc97b724b8e161f59068087c3ac81", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5506.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e47c147b5f81fdd2b4e328acf77af2c16237da8ca626eab15a05acec26a949bc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4747.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4b43fb48623069ed72f87e0ae07b8c3727a649a7f0c670261f4081f636d847d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3939.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba0d468ed6187b8d44e488b01314555e744230733e66cf5a68de7386e1ca1d4d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5414.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ddac5c817a0d021eb80ccbdaafa8e852dd69e24507b08cdc1939d76b88637743", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4340.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d6be9925950e7569ddd759e3b3eff26ae28e09bad381380da88f88babb59b6d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4439.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbb33260e524db7f407ec9d1d2f84f67e1808a48e98d33dc6ec07c18cdeb0eb4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5678.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "512d3a8c839f781c6b73c5acfb8b06bb8d58f397849c69740a20ef0dc29dbfe1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5228.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca1c8294ed613c016d3b0a3b8712baf9a845a913a81a6f8b8538cb64d3e7c9b6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4985.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1027f266cd7bbcafd83036beff691c699f89a8236f2c69fc6c39e2f7af8f27ed", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6015.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e6412325a7886be3dcc6a55747516c1f6dec9f4741b8fa1b67e75749852f8dc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3515.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55d52590652945846ff843110c27395de5ed5a151ed31a28ad0e235f2e4100bb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3329.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0be94b8c59f1eb415a4e4e6c987772c0395a67064f8660aad11e97d665c2813", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4116.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d513da938172df229302c6cbf537b739fe5e6b667dd4c497a0aa9aa1c4ba0de2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3811.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9fd3bc8933335170323e25fe690fdd48e9ad35631ca7fa2d85df8d2531c24248", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2353.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d55f3a6741628c4050feb409a8e3330776b79852bed3167e1f8a41e03f140744", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3542.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb86b5e86dde128f23e7ed61427134054c92fa62ceb299b7bfc6b84509082eb4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5479.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "966f175a77cecf33bbe57e5d0a71fd87e268885d1fdbf448e7739d32a5242bb3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4312.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ad263cbf0da69ce4afc5ed5676378b357e4861b95751cbc11bf989f9d24ea85", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6193.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb5bed19fcd87fe34191001b0ecbdd0df93d1424926f00840ae0449427284c42", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3807.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca7fd152ec48251f0bd04cf06411255609279556ae5362ea89f961fb2774a35c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5195.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9bb1ff2fd6875d6ac62f4b86af310227d77a39159a1cb3ec011262e6957af505", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4784.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "adab996eae03e15ad03bcdea0c6c90f9e35c46dab538df4d6437f4a57e22711d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3368.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6d7b713bdd8043c514e11416eee71a4372be1979b72396ec630771aa0df9d82", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4157.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dd3a6bc9944b6ca4f50aa0df7ee788672e9dd60afb0db2053bfb21a0bab206f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3850.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb4d84ff11853f26b167e8a036361bb07fdfce5e62d58020109ca1845dc9a9ca", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4679.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7efaa197b70e561ac4e6afdba3fa7ad0395bd11b529ff0a10d4def2bb2fda420", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5068.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e14fce6735e0777691506aeddfb92ea613e9a3da8289767683d52f78ec06ffa0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5592.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ab22ee69eaea7ad025e8b08059c00d2ba950d661e3bdea94ed9c6511d2a8df3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5711.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a29d2709bcc53e0b2c74b42b6055f749642924e89f2d247a34f6c683e79ae76d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4803.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "000f39747e17042884f3af48d32fa724aa6ff5992d54387518c258172708ae64", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4415.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c7086d98bfbbc68997477e4d9df5cc19604b6567fc61152cf1f117248f2066b", + "format": 1 + }, + { + "name": "changelogs/fragments/20.3.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d33814b718f1d3a86b69c4723a1f4e68ea28b184ef561643c80212bf62ba9ab3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4177.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4313a3349502f8888c923df54b14680413b2b2ffd150d000513a4a292e8bc316", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3718.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9dde982ff650ab2c08ddb5fe8e2c6f9de0cebada5cfa402c7a283b518680fe0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4527.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ff0b584f24e45518e4face6111d840f7c17f0a4f4a9d9e5165cba4ad3de04d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5859.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86e117afe1148ee8bcde0867848df8d24569a1dccd5f2a8fed9975698663f843", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3870.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a74e0d0fc204a2a07787b8ef895d24daf0274aaaba778689f420d26eb581a03", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5662.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9a60a534e4bbe973d2e127e5dab1b7b3fa36a1b6996929250e84d22d4e6a4e4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4338.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aaa405cbb67d09ed83f5798c57431a636e0bb768c823dc223d74e6dc7de98f50", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4566.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36055285110730212e361b7c949ef77dd64ca8f396d113ff354dcf9db4277c35", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3535.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "932a4bb0b36245a81e4fe99093737d412d90b2128c28ea49b4d7b3575faf9c49", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4161.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69aa68b5ebdd9ce1ebad370a7b2d651d06c5f93b1f489e385e469b00fc232943", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4862.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "492e85bae5597972def6ba628afe0fcad3b5a0cf6178df7b30173fe6966913e9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5137.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64e21fa1a807af894afe3867ece0b3482fd735235be11b520cc06de676b7f0e4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3149.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "069204db1e6da702e07111fac767a5044210fbbede5b9ecb30da7031356b6704", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4399.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b4cf4fb3890c096a0a57a3ab88b530dadacdafeb6d8a30d1b02de9a71006b6b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3667.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45dd51940d9d61bdcdf76f5fe180ad4a84693a12173df3b63b13fe19a970c8d6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2426.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26a62f18ddac258e5f97032e42c4d801115c0b03c76654918267c2a4d18777d1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4771.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3fb0ac0baa0c2e130cc5a51a955a08cbfa0796475180ae8622ee3d5b84c336c6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5926.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "113d233601f7758df61f774e7ee37b9098098f9597d2cd919ddb8900f76aafb2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3671.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcd404fe1bcba0fb65e2b8902f18131fb8b3d2cc1d1065a982db86263dca3649", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4337.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acd01e5f8bc6e8dac1d63808b171c9b201f050272e4d602753789eaeb6d2e577", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4767.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d33df9a5ccb435701f9e6bd0de098ab1d5350e5ddb18b7ed935fc12d61d4764", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4788.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acb366b173883eeb62ee6aa9194993afb45a2aed8753937a0208f46823da03d0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4049.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5cbcbf96d4c3c560eefa6f39692c9c525a129bd91ac5c9f2d4c198732ed5ce2f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3626.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "725f6a70a2ef5ad6c84f02b2882e0485b7243f35ae8a42775fc483f82685037a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5121.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "505a43594f0bc7eb27b384bb08bdc3f027cf8020ecf776b4c55c7af4b072fa7a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4729.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "776f256485b36dca92561c51a327250072d9035a9cb746d66decbdcbc95dfc62", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4731.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2b914a8d9d19be24bff7aeee6e7058cbd3bfb72f233347dcabe516518b80ab4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5065.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3a4176e2ab73d521f6486a92a30631ef57c40ebc0f68c5d947f9b3a51475ded", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4048.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "062c8124b32cb28d07686e9d1852182403b719beed51b3e291d4b5b3e18d8d54", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5659.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11c7809cd386026df9ae46c6bd76237bd3121ba686a87b11f13baeeeafaf2c14", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4336.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "574c556353b0b56aaa1e49ce77c70259f92dafa707b77bbf9b754f4bd659e390", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4789.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66eece6833fe00a201568814c4647d9ec664865c4ebc20449103843e1da7e219", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4623.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92e5195c9677461f0b5faee8dd8c1d616ddcefedbc1acba198da6c503bd98ff4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4770.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d210f3a3f97b4a082f677342345ede1d8d337b8220de39057997aad5fa057484", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4320.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21c7843cd68f2afaf5b0af138ad378023fa1621ccdbddf9686d1d3d62b585073", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5161.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "161723f7f6ceafe893ac5b5128359317047b76e47f1746868f1d86464e7df94c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5531.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6308447de5f46a840f8b9e309a97eaf0a0875e3bfb7ef93c24290e5ecbcd6cba", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4459.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a40f5cf5ba576ebb466d5dbbaaacc4ee320069720ce006603226e28d306f49af", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5136.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "529f7aeeac9a72e0e8f57fd5cf6bd576c490f8b2e58fdc2e93568279529f703b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3148.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f04821e53d5a4a50f488f75c5f80bf3dac061ad546980283e428d4bd2e85735b", + "format": 1 + }, + { + "name": "changelogs/fragments/20.1.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52a66cdff25de9b14c4be8365e3130f5ada9ab7b9f20c40b8635c1f552291b1c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5589.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e5f0360eee42e2b2c182f21b4f00c49662f14706b67bf9552c260874d46bbde", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4818.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "890646c654bf1dc901c4252eaa3098a923cd73c10ecec2cf9ec33c7651078909", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2459b.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14d1b4a16a8c825cb4c5b1844e18344589f72966f81f76c65d54dc38378e48f5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3830.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ced3f81bda12968c9138ae833586fdf977bbebc5ab60c5b6845f6af9d97197d9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4863.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "263628cb17df5dce45fc27e9c2cbeac80b6057f76e86b4744c1d5d7d3d9b3b75", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5819.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87b78d6cd0ecb891e5d6e17979f4e9a35660a88df8f854db92e419797aba8d47", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3534.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32879079f92100656beff0ac0fd1e7659b0a36df702f9a53eae973adfad9f233", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4588.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7748be15b82931efd48d172263eac0d6b3d854606da0741b1e54e4f9ca23afb4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4834.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c77fa02565e0408790db51731a1c1dcee6b01387940c0f23723d4f0cc422b02a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6235.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f72d8514ced2d9cb39ca6a156f4343b4ca8e8171bcecea6f1f630aadf1a1477d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4121.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a8cd30ff6aed2c0b2d3e513e6efcd953318dfc464c4847dbb225bbae2360fe1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6262.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc37e94bec6c6441eacc739a1820fc9e1e3934feb0ded9f5e7aafdda1298fb63", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-1661.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25e0ecb72da920acc83b48cce1efb94ac0aa2841eabf586145471a3a14199030", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5788.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b47ca1b1fee006a0b3018d51153b18f7eb6d8f902a8a0cdc23bab7cdb5a7f0e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4526.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc34427a695f99f79575dd1bc707cad03a36aec561d3e118ca593ac37b2592b7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4802.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a80430b67647765359d311d058f92b7d2cbe7fd069bd18a797657163af680d20", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4228.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b145461db91df8b7f6ce0e4903860b7d15a015cfffc597879455344e338c8b98", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3369.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f1762d7b528ab3861fcc4c056ee0fb841600bf50711fd9ba760726b8612a310", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3386.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6dbb60010475fc16a75a30f87938cf1e8683e272d1ae4249789d036cfa28de0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5481.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aff9afd58cb33a9fea59f06bb364f83c5f7954cb8f6c9f75a31750ca67c02cdd", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4785.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be99d091c9be277efb065a9c362a52bf5454d767b70ad6ebf5636a322045607d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3685.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8b97bf44db4444435cc8f27155b5019710b89a1544fbd8116d8995c1cef3507", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4140.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2341a5303a415abe747b3c987d0f0b3e2a860b3fc8b945b1b4160a518d54633", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6192.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f02bb2f7c68235c76cba79a7565e0bf22b45b63b6c04f4a9528cf1489f9e408", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3390.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "822fed342ba51d797c0d796b1c9f4c4cf376c3ced2834ea101a8177bd42485ea", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4005.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca874a3d6a36eaf7574f51d8df4b462c7825f49786056623496ea051e63a53fa", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3113.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "885ee5ca962cdf4dd7c888a936f057c0a401414c65571a4030628fb4cdf6b32d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3543.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8b5724eaa1db66e9dc12bae760ded0341eb4f0ed159a9cc452501d695add870", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4114.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0bea56e3ad16941e20c18dcad393abbb15173cb10e31772294c6bc4c186be46", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5090.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6e76f8cc7c843e4d652c9315f5dee2dd8f5d755db914e9c148705f5794663c5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4394.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d7b43ec4cf989a41c69a8b9d111403440fa1d4432e4a153745dcbd5ec6ea627", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6014.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c3f9fc50fa8a60eb32af40abdb626589889dfdc22bf6f966615b441903f925c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4479.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1d1025315e2413f4919a6f8a6a0931fc29ed55da9de47735886c0d23c662798", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5268.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "866ab77bbccb57a15435ccf6ed3dff0836b2522485493ae0bafee7ab6b599f02", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5287.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c88441e05196d5457796c2b27b18a6d34bf4c7009d95af7defec4d43cd2d2573", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3580.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f9da0d4aef96ccbdcd4323b943a8b479805b763816dc1412d8e8b79b201b97b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4300.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8768c16ccfdec53753f5753ee420f246c3126452e46ad287a496d1cb5f900c09", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3241.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "adb1bbb03de5934c542ecb11e8b8f1df211ebc7e8e0f5b0a0a7b0aa4a65c61c2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3754.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8795ef9f837dbe0c437ff91b8fedb1888c77323785f07239ba0d4c6631ee1a25", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3304.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2abacb569d9adbd7b7587b7190f5e03fd88bbdc1c246d8bd73d13ad82b4413f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4465.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "12d2dabb7d1b654b882b58728153e06a80d0d5304107615eedd4938e6bb1f147", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5229.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50b2f37d6fab5e5b903d123b11c657f4e6115b76d3988736c8a960bf4fc04049", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5696.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b68bdb177e8fa5c2f9d65b46173c15ae8b797c58f194cd3400206eaa42195a1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4984.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a6dabfc37c202c0e9d9bad3c4f293b78c92e6f5bbf16d85274cbe1b545114c4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3312.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "629aa011160d1d211417076d19b82f46c782b4c977d82405b65a4afb6a9f8c0c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5415.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72a13ddc79220f215327cc96e136d9229e35bd7908a08eee79d13489e63bf599", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4711.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbd8a2b0be7ad0692f70b190db50deac94bc0234a38fed18a55b49c7a20ce2bb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4341.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7a4489286b395764daa2622c942d5f04cd608cd66c5c82684a0408248cefe72", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5784.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d55b1144ddb847118a019e72cc9c23fdaa011fb2e3d3e1a8bb0a4d4b0dcfe1e4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4879.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d9bb503cd9d63fdbaaa607b6dda93ddbc3956077473c435d464d56f8c7f2572", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5507.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52e5456129525d958bfdbe2ae55820743eddfa8a120177140a394116a8ece210", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3579.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "575eef7bf43a311f9e616200f1b8f8106c98c830c88422bdadfe86eb78addbe7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5275.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f51e006ad0a9c2ce06fcf389a1ccd27332e869651cc29abcd9e2735950600ad", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5760.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "268ef22281d39444874788251b5711e682543719d2cd511574fe2fa033bf3481", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4872.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c039439641a6d9eaa47121e7bb215510aa21375dc1b620ae4f9ce30e684c0e32", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5019.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08653c5e744fc97187024439c3b0577210836450b1c772c04b703565153e8d6a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5367.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "648c381fabd1ee890df3246fbeca2a91358b8fb39fde6b0894b6b6523bc148b8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5737.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58093b7d6606aa8d172da3695af80017376ea978835cb3affe046aa35ad330a9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3175.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e09c16f66dcb15a40224dd49a85e3761d0b813ff5ded2f66256492e92e2cfb9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5808.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "291ab81dd54f4746c902d5e4674a0d3ea04b51756688b967efe8acf8959bef6d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4022.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d2e60928ddc1e63b8e9251d74089a7665ae0a4fe6019ecd4b8d08f963ed8bf9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5263.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f07d0eb40bc8ec9a5a1c0d84364760eb74a8d4ccf163e4b1c9348feaa1f8d4d3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4864.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2339d8f88cc5734234c7bf9fa0cfcd69482d32e12bf6c6a3bafb9934ee4b4c8f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3358.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae8d980c571479c22756401c2dae46fa7526ac8e1bd071ff6a3c8b1d160e207d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3812.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18fedee21398e8b76ad54c94a8388d688eee426ce28a983d23079c0b68a4fbd9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4832.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "678ecc63b6ae75a446112809b600f5a006f1295e3948ab3af59e2fab4be5568b", + "format": 1 + }, + { + "name": "changelogs/fragments/github-56.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2f10c30cb7f7e72a5cc918a8c70a6d4aa46b6427131569c565af246ec5075e8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4235.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6cb8d965dbc9538dbccda2e41c5e396e1a3a103081bbee12efb8693166f7047d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5536.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "113dd5c5366515dc919d9de9be97d0e6a08e6cff30cc974bbc0796bee4b2608f", + "format": 1 + }, + { + "name": "changelogs/fragments/20.5.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a45f0c9e764458cff6d07469a5bff424024e43f74a4d7960235ef6a8d06d972", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4798.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c51fb65d098942356cde1aecad4a5a689e5d6bbd70657a09ec27102e1e236844", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5189.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5412d9f1d9a0a879222f856cd2a352240b89b1585e4fbed2527edcf997e1e6db", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3677.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cf63d84adb809f720121d2cd41899fe90f6bf0e44d1a3c258757f8b4f49e054", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2965.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e0c8170836c1972e5928a16e65535e63fd479bc4e197b77d7a3ee5616d6e2c8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4331.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "108a273c13de030c00381cc73f18acec4caee490d9874cfe8899558ae9cf1454", + "format": 1 + }, + { + "name": "changelogs/fragments/20.9.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68a3a7e3ae54392aecfe21feac211fc5539cd10d3526a24eec36c05d740bb393", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4809.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "048f9de47f5415a03f8ceb4540e8b2a5ce2355ffa3d156ea18da1bd2c772e6ba", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5127.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2986a94928fdaf96027d7dcae35adae32b2b54a40b4733a3b67b0aec115759c5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4736.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f656d0e95204a0e5678e62b1585138726889b690c8505e6092a96e3128c09ec1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5062.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "013ce4d086fdd418c75053d70380ba4da2bfedeb4fd2412a4994f658a0ccaa79", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4604.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bccf100466e1d2e8def0099332144337b9e46afae08a9829e8bca7487e43c5c9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6005.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64d7106665a91e865cc13f8cc7cdc6478f0da133ef715b18b61e9e983c54c38c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5629.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef719a7fbd54ccb864836ce5f2a6079fc204dad9b3792b5ff293ab39305c3c85", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4487.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb45069479eca895c2d8d1e4ba512b31d0cc0977e50d54d81e027a02aac7807c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5412.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7abfa76d3978b0b2d2bed36adead969cef8d176090363dcfb889156e3524bd4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4716.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c17c93eba62480fe5275cea815c6585c575caf8309268f5ac584d61403f5d1e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3483.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59d427f1209a589c6355342d9a812aef13df4cb072b8dac72a03df47365ad793", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3883.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa0f58ee7b31ac2f710e5c72e939696bd645ff97d9271b0b2689bde4d39396d0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4645.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "088fc6b24c8c8bf20fc87d4220a24fc1cd56ad0234883add73089037aab5e24e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4350.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7539e7cbf0c21a24553136a5c17130894cf9652886bf0873da2798cea074be3f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5812.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "459e4224a55664177e07d4f89128d2646d55dd7d3d184ebd3677729c3d532e98", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5079.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f29433e75f2f6e26759304a4998ff0a30f37a6cd11b844d0db519503ec61556", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4079.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86af72b119d036819c6b0b03e4b88ebd7aadb65c389c4c5830b77f0617539e8e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4612.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a04a7569df53f7cddadd7aa08bb0c24e6ed5858645910b4aea23f6ac0f7a5052", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5453.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73b277654dffbc93a35e27161c66fa7caa4cee9b9ae69f3f147620e2e7579e45", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5845.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd54adfc8f84bce14fc49b96c96ebaad63de6d9e1cb10d891e2a47fbce7f225e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3354.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee64020d610ab58bb7b01814c1f251898393c1ac439fd0e7eda3086175b912f7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4393.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4a3fd91566343261728a42217215de5d6044a93089e0bf60ae89218db12b012", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4540.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e395344f80eebf0c6ea81e459be92f441f79d113f285b8ab68d46a230ebf0788", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4813.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76d087beb160bf2a94525611c039bb82b396662ca5818aa3391d55c8d562e677", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2928.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4972a6c0cfe2ff311a002e76a41b05262a5885cd8fe2bffd65a8a92be7c46595", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3952.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f585f7549052e722378fa2c561708bca14147316142341ea2be5f3f3cfa19d07", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4794.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38842bfbc23d8f0933025d9da00669aa96e460edd8cc7e1f3c34cdf1fdb32228", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3401.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d4c5c41a09e649bd883b7bf7e9282aae03c8c694b6ba826dba750bff9fe35a5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6195.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59626ae42fc6405108996f7a7ce603428643bd5656f414a7738a52b30da66555", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5243.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46547cd16e7c36c5f094f913f762e01b4b0b8fc5c2c4db98ffc8d07074d7541b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3801.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71d777c50692fa7bacdbaa60af9daa6df9720817f0bd797a75f40f08cdc73c49", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4501.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "145791d3ef284d3169c0a58d2805d6f80edd174a087066727a1ff56144920baa", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5310.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ffbe08cf026c1242a1d4d39cc34ae1a1b1ea7578a2c6a67288cc33b3842b888", + "format": 1 + }, + { + "name": "changelogs/fragments/0-copy_ignore_txt.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03509fc6d9459f8d28ed772b43edf0572721c42bc841706a66ef8b0dad9e8547", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5594.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de81c8bf51645946645e6bb57e2fdc4c5992aa141d9ab5e21236e88418da7956", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4804.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5a6ca034e4e9ad00c8dcbc73595162c25e8e4fc936a785a71bca25c0d11d555", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5983.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f34e8d78e4c7ebb1b247c252d8125608984880bc00767710522b520f2150e58", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4691.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a11fe5e7fbe5ac6f56e08e2108f7de0a1afd814eeda8071907e0c2f6f6160792", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5595.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6cf897e53829b8b93197a1c7c550ec911d8e6bfdb26896258f44fc42ea1dcb62", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4150.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c682432460e011158025ab840c9d60fe420817101273dad1c65d59b0412352e8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5604.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8892adf418c27dc01257030324b6955e3742a564663acf25b7ab7b7e2f5255a7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5487.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b42f7152b88325f09b3878f441e4bccaa286d3d9ae241b4eb8e4d76d36223f29", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5757.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57f04cc894563360fb93da11957bd2d01b2b2da97ee46f20a9a91f1e0b04a282", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3400.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8c3b9dde1166cd5e04faaec18bfddf6b79d0c2e9e660ac7fee4c05c8e6720cd", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5215.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e849d44879e80baf8a47529af9bb770fd891421a7e84e27172b24c4fd5bb77b7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4404.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca5cc1d4b08850eab0103cfaa80eb7218671e899e417bca36da3f630777432de", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4392.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7695cad7e10bf8e8661e552c6a611603ccebaf6186feecf4ea23615383ef6844", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4243.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff10dce2d887c987913b38c0e28c90d45d13f8458d4fb0ec799b687662a0b47a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5844.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3117221b638ba1d95d869f84e291b52e503e10ac3e95d6b13045d2ac5f86636", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3139.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7efe12ee6eebd037d85cd27de039581aa661646b6da344393ef77dec0fa2207", + "format": 1 + }, + { + "name": "changelogs/fragments/19.10.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6017cf3e9df1bb5e888813d13dfd69e8cf62a8bffa0dc326b7ed1cdc10d4136b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4644.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "440968d1eecf54a919892b56c280ee107bae36edd80ee76c96fca6059b3e2f47", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3181.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c1b5f6c2d7f52b2d7835a95a3e928fa7fe0a8188bec3604ddb6635ec36cec24", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3494.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "047cb40415a2ed6391172c573ca7b0ced8e9e42277eee30c150d581c16078247", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5540.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6a3e77590b685362b8938b251e72f1b64e9b15a14bb146fafb21dd31b1a1fa1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3251.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8eeab9b764a6d8b4d70b21ed516793faeb8e0dae62a44b51046aa6affc9f0838", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5413.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2266bbe0eea5f1ebb56572f104e5f9d1af119a1b35eb05e73a83f8fbe36aaea", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3178.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ae05f4b30ee077e947c9c17f1110fc38a1fe347e2519493ccb39a46ad0fe227", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4347.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0ecef13ea1958ece11e33ef2c9457fa887e4147628cf24cb3be0ab81b720856", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4039.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f6de1ea71c2984a682783e69ff2dda77a2dac5a3de7dd9c6bbc225801cfd9c0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5628.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c3c04a951b7c22275f919001c79032ac411b0b6096a43012cdbaea050543db1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5297.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06ccde7af6da5d8122598bb5739feb7c26708e8aaf169043cffa26450adf90e8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3969.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec840d73f2bd0eba15e9da0939c459305b25f4006445a22a50290d868b2b4c49", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4255.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "841553fc824ccffbfa9d0b72ba78720e537c3dedc4d4eebc869e1b03cfbbce7d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5917.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09a2a889f9dd289133b99b701853acd1358f4645d5360f226abf6556245c6777", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4605.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ad2d67fc5a9e220886f219e615989278dec75c48fd28591b40e6c9128f0f43d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4737.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4710f657ffa5da2977fbe9beb166ba69787ec18c424d0d8180771ca6857e139", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4367.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c5e430b174f27ba09ff05da704d8dce689b5f0132a83eedd3745ec71b7c8bde", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5960.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "409985476064525ed65cbb1a6605478fa0fdae2888c6ec39a976228c65ca1dbf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5063.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae0121bd1ad247c93bf31108cbc5d51e393906baec30fdc2c6a544761a2d49c0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4808.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f475309d53ba3a6520ca2ec8f41e470c7c8a573cce499d21debd47f29403bc65", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5034.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9242e4be34989d7d84f262e57e76d121d157a67999f9c30afa24fb5e3a8e0a5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4449.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f068db95484272e5a62377595276cbfe0d9506f639088e2e5e0e35a64adecb8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2964.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91e53b1c94a0925984b409d46ac2c50704d7eee14de2ea67d5be1360de7965de", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5537.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7aeb8424c7d45c497c76377267caf52361330850bb1b963f817059a5db13830f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4776.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45723175aa9e40b7ba1ca7e19f812fad729e3c2c79cc605a5219c131dfc982d5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4799.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af8baafc5046b52743909dc3f6fc587c1ac3155b9350ee31ebd3ef21f14ee885", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5188.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "913f74900985f3c7c06172c9cc0ce73c2c5d93313c26abfe27b9f01b95661eb5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3230.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d319b49f11bd16e1b86223038e51d8b8c1c366816becc6f4e0bc3d7f633f067", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2972.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "125d1e91a23c398f76730d703c5aa05450a3a6728b395816de4a960af6ebe12b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6209.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3e18af82cf100958090b49e78a3091bcce7e33309b3a15afe3649ff14f69648", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3772.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a36a2155253e82ce39248b054f4ec87bae9733a02645cd66fd4b9330b36f6a8c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3973.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b2575790ee1e00096d45f02d046f465854dcf4ba1c7ea67c4a8c4041abf7a02", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4648.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3f4cbd7bc0e8f8d281d8ef354a0128bbfb913cabc1bc17f75c11c10716f2ed4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4218.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "774d1d3f253bd0f406bfee8d3fdb91d8c7ebad7824187e5f40d6a01dc514ce30", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5409.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "216b65b677b4eb42b09948fae17d0ce4c9c2b6589c561a16aefaab232b3b9da2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5665.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68f81775174864dc0781c65e3cbadbeb3b1dbe033c86cf0470339f9a0b3074b9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4998.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7abf193033bdf049acd7e82f3f95f7bf57c927c589aa87e41a6a5086cba78280", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6233.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9aaeeb383fc77a93bdc331ae6056eb9f4bc85ea29040b24c832739ed759fd852", + "format": 1 + }, + { + "name": "changelogs/fragments/20.7.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6184c68f2cdddda83afdd95f7b9523aa9679a701ff8a2189e7c61cb1edd1025d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5223.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20322c4edac290e1e693a71ab990f024a7ee47fea878e3ef43a171adb1e49744", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4577.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c31822cac48355aaa1ee9ed1643d39bffc8268f0746e8ffb6f8fea9db4f8802", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4609.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c21c059f4197f9fc7f998ef0f9b689db572d86c439d0ed3f960a18391513911", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5761.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "285e5e797ae459add154e95aba467f854e07ec649f53d471bf48521f2473e6bb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5430.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "507f1ae7972770ae4167cd25e4610696c91312980768a468e4dfd1b04bfea288", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3767.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bc1e0687b22f53b0ce0ddfeb0920a90b4df3b8a1da00894aaf70876c4f5f54f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4333.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af4bb8d7e6de34ba660722e995f6d1b4947852180c33b2d1eeff4316b510f82e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4763.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3e0758ed57e554705cfe19415948ea16c791710656d557537ef2ed275d23bb0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4775.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fa997e6ca72260dcd5c721e2df10612b3b0242df4c3fb83b5a37cf981fb4700", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3399.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29a4caa1c9decc1eaf4442b59a5cba4f6bf5e41e3b0107a70db053a00e2a3162", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2422.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3862999d0bb436b7615be931902c51f5590ae53525ea1573a1f83318b5f45c7c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5426.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa56478b4d003658ef53b2030e5331752538ea080c72343d4dab6cf6b5934fba", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5774.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0160fac57dcf89debb071af340992b45dff854a53506a93a4d37bec25bae42a", + "format": 1 + }, + { + "name": "changelogs/fragments/20.2.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fefcc121464cde10b40963deaf29159fcdefe108ebaf8b72adb4d1525290a0dc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2459.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4260a8454db93da6f6cfa0b573e1fe83aabf84cc3cc4cd97c5c89403d78392cf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5666.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9180ec64ba907fe9152d0e862f9cf144bf996d46a6e8de4c82729fd79f1f10c5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5109.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74db81c85d74272f9fcb6f49e49fe09da3b68925a5d7b5181c8f7b330aab8618", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4348.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a2745a849eb718e1e961dd3e13430e973f1f880913fd79a560b887a35651b3f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5735.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb8f1aa52d5ddd3b468fe2344adf5bddd9c7ce95ac1c53f94f5327b83b99d58e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5220.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14a1144490ec476da5dd8cde6896c771e90c58c260d0dd98453bb039c1a80b68", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4554.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1643a9d2c86e0b18f2bed0ed163536fcf8333c3dc9e73c2d1687db63355ab4d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4807.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39ec8e2db3485503e1842396c45ebdc7a5a2f187b1ec5e0ab81be86b86c4e7e5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3442.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0730c6978d44995f2675e0a2b6a3c8d73f0d7e3c3fb1ef758a08607ebd6c4256", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5596.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bb3634da2cb2671ef16d2c3d1ec773268b956cc5789490ca7ec121209488ecf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5312.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c593f33126c5c996ad3b4f10957db71611eeb7dc3bd8d4ae4badab857fe4371d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5938.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23c2ed9d3436ff6b327077f6cfd27dd8e400993ef3624438240c4f4da9181341", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4780.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1682c614bb5b1009ad82cac555425b8da934a89b9d9f66c301272cf19243d27d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5892.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "815f05dd922d3c127f96f3f548de8455649a2b17c3f2e4d2790f0f77ee943f92", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5484.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93154928c485b07730d586621a809a6d5a484f750f3e7d7f7743fb45f887e11f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5241.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff969a93f182bd36bb5ee60bb28364a0aaa78cb09b5cbc251b65f7cedad5013a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5611.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "606bf46e0b0189bd523c8597bba242d8184b3bbbdc6d988127f854b305d1f652", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5304.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a835c904ed1140ea523980a5860817bdbdbb234f5178476ac86dd58e2abc3c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3950.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a9fbbddd08f9126f98cefbac316d42a69da97feacaa63397d8a13744e98cdf2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5168.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d433e333737312f784e6c78bbc3ebc8e823f938c6d776f97d68dc288e87fd3f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4329.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb9a6401a2ffb5a0678a76dbe6ab7aa87902a1a44eec8acf5ea857547334dbbd", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4779.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0973b4d9f42398a4ad2c6fc0f1ca5ac2d7631c1958241b8e4b416393b220507c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5216.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "336110dd04b06b8bdea936ebf64b21423fd249650a26b127964e83d364fd2820", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4391.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9395e66bd2f7b598df0c15fccb8553c6c9405c5410de475b5fa0e4ffb055864", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3454.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e537dc1c387c40fbe90e4c014cd0e53e0359bdf179154d8ddfc3f17fcda1c5d3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4179.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb5a5f91d8f1cda5e6b47e4cde029eeaf85550ecc5b00cb6eb329d37fbcdf62c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3497.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3be43b8b81039b5c6930ed5279373c0b62977e83f127d6c27bc1bb99f2f3b3d0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4435.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae22a553f14597ebf62c37c0cc7969c135e9f0468c2cc45428e3cbe1c928d348", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4981.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2e84a5f69e9458d12d9a242f221b85406b24d863f0b5f734245306696e8c2c9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4344.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad62280bb726b103cbc0840dbaeb746eddb23b9dc863ed3b40b50effdf235a4c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3194.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f421581196d13e4737cf90233dde8c1791c90ce706ec84ca926e889bbac582c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4190.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0da3872c03264161ed4502a7266fa895d76b6133538400d9b15c7021caddd1c1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3655.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cf7edf2a3e28f10a7c13d5e7b58687f7f598eb8fda17ea00c00aed661a3c5c4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5152.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b63f611399ff4981df78ed98675b7af5a5d7f72915e6b3bc0b1194aea144bd2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4743.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2732c37f8e7fd1e686e8b72fd93fcf125fbb0dc98356a2e9178852456ddc4fa6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4256.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ce198a8a9f1c6a9c654613e7a5a0a4ee31b283929ccb474282f4c8a2cdbfda6", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3439.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dd9dc6d333ce1ed05c443650b28f534015adbdbc04124de0441d5d3fff3b2e83", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4606.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a98a0c14d26c263b13be225fbf26b992a94e9d698f265b420038efe688cfcd36", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5017.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63b6b2d06a7079d433ea3e35a2426f8d839e0ed016b29060537755e77cc5daa8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5503.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d96ec7bd2bdce1f14f0a7e05502e05862ecf33f22c0afbaeeefd924132392a30", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5016.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0139a21350c289501f08e3ecd3f15ca3144d5820f4002fc5757b23ca89fe915", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3654.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d60a311cef6da305a6c0fdab62375b3fd257a692cafc665387339146b580a087", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4191.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0372b1e4ca9eba5db6430f773536b07ec89ddfa89fb6df6fa1084fd9eb4d3c8b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5807.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "290c983fe0a792b1c180deabeaece639f03e0f68bd13d5d84d3fb5032a36dbc7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4345.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0bd2371a0f1e2a62aecb494cc0b78ba9d20c28adbbbfeb16306e14eea6e0593", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3480.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7f4711e587bb08683bb9fa92bdd216ded5e0a14445e254396171795b340f2b8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5738.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48b8bf5a4c2fccc72527aa3fc718c241744098573e4927f3a1fc93d8ad5e2a7c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3479.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e41e4503cfdbc1c3fa4db6d8733c4630ed69994bc7f82b505c6c319d56989dda", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3615.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80e7d18eec2bae11d8fc3a359ef460c473e53f893aef8ed36a09c31d8edac0e8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3510.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c29079056af2538fccff352ef3e6c55934f609d62ec88a263a47ee554d3d463f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4113.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "041f052ddceba545590a97f67ebbdd91c477fcbc60f455b62feb634c2d31121a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4342.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28613112e2d8704a1c7bef9d5995563b00d9ca1e41971f0b0520fc7d6061ccfc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5190.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d60a6f1d2bb749c5af7d30a6b6d865e84a93b3def3e129912811bb2f506c1fff", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4781.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88628edd4b069ff20689a18172521282620f2ed295de6cfebbc783e4e7937efc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5485.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a61d4ead5bfb8043e88f1c21c1d36416d2a32e7505924ad9dc95c390319e05e7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5606.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c6d65ff18bceb957eca93fc315f7fd083565c21a5540e7caaf4ee6b39034738", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3443.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9a4d56ad3d75b37f72420eb90d516e2d8cfed5796b5cf96967e3e011db0023a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5082.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e1a7ff89d52f94d499978cf346e00bcef4e6c1ac37dabdb6699d3014f31f85c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5344.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac5b800057b7baec5378238fffddb32c3e366a284b80cc6293ed530a5a7d44c7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5299.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ddec3afae21eac2484fe41135bda6a0a36e30023be206e51b02320c0676eb61", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-1665.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2dd98c61e47086a235079378fb52aa1c876b80e5a1ee3d3dfddae0a4f31d92c5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5626.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e42b5c2764048d3ff90980b1f016c62e3c4ccbe183d8544396dc9278f103ac4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3571.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72aecd041004bbb620f42a9ba10c500e49226b7175e273e4019a417f72fa9b38", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5919.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "230fd6dd1590c64a9e3f656c8ffd4f1a617a4eb22b3762803253447a9764e163", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6266.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69c4e9a3bc9f39c45dba1a57dd7fe3051e04e9156f24fc8de272e4d05f2b19c1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5734.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f81b77c69a62d357a761de1dd51a33afa6a29310d1821046696250fe186417d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5671.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05ee6de5cc4d3ecb09276b52f911422ee874f4f5064353aafc6cf6f501a6d5e2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4060.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1630000a5cc7e12e772e3b381b20e167f8be2d05661b44cda44173cb0954b0b4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4349.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5acbe60ba9cc9fb800bf7ba605ebfffdde3d9ca7ea5288fa3ca993b4f7501ee4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4830.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60cf401aab846a39010d87aaa8fc10ff3e054e2179aecd13f18599047345bc07", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4975.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab5d6f1d98bd1f70627af752c5e03416b2c813261975e31b5ff98badff01aae3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3926.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffdd1243003227a8e8bdc50c02be8a949ffca028a8510437ea1cdbfedf7c0a6a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3137.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aca8c98df772735b978376dc3998b1c9b85fd25475964bf34749f1c22ac29044", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3971.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f102dd8195fc2b8a5787dff9a0d8623c22a99e29e65054e3cfbc4afa67a3a55a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5427.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e6eedb19206e474db4248b757df62feafc39f3a4841a8d89a3d19b4f7380bda", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3662.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "324e7ce43ade545c826d145927bd57d12398446233ff311f2954d813af759016", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4774.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00b23a2b51461e4ee9bb1b889903453699f8c91de99fa5e32aa2f4abcfc44998", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4332.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "daac1fb3757f3a5891f4de7ec54c83d8d6fef41819d49b1196d5293e9e19c4df", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4762.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3ef233cccbdd62481bce984ba6a49c59f86e6cfe0121e96be8e6d4a8654dfe8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3623.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8878e23ed8e92a89dccdf434ff93553c0e4c26dd1ec1afc8fa1e2f406f462a01", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5431.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "719a134efd48ff3c9e50b5a6246b59c3f36c814b1666a92655cf66f31a6569b5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4735.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67123dc7fabcf9954972ea4a615b8e8d740e58281b40ae780e27b03a98b53c4a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3628.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8be0a84e377f7f257ab92986fac9bebab47918062f4d4d195b4dd9da72636d17", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4417.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ceb3905c08630f6a83e4fd28f10d75f96ae7b98e7607ae8d0d03b41b855872f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4801.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d24f00cf54816690468d761a890a4671ac341286e21ccb47cb9c3ca81a0ae1ee", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5713.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b813b3546daa6b835624f0da6c3d8e5e24ee7e80848c08fa743c1a6096b9f633", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5986.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb162412add3a4133256cf891dce07ffaad570f887d5aeda9c5bbd648d00af19", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3501.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6293d7e0a9b5d299b9021780efa96d7650fe68219dc8bf69b27212eecb1a09d8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5085.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a11c9ab438381868ae90732e3979e1f43894878df202b75905525c273335683", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5251.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7f110e54f0e7932b596adde06ce8518f84d12b3381188bfb11d2edceaa9579b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4010.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3b332c01a976f6e6444165f4a6b76a3c405486781a61125b7f4f28e816e1692", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3385.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d081c6cd36ad9bc682598ab7970998af3a27506625315da10e8074c1f18749dc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4786.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "601af436680b836904e38eba3c8726eb286574aed07a2825de2828b558408a54", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5894.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b17508b710df1e5d2a5cc54d304f102cd94cbe6f339194220eea3164c0f8290a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4769.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "971c5ef4db7f2a6bf867af69625804bc6878d7221999f3c662dcd1e89e8f0360", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4339.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ab60c265e4089a98ba6cde22028d3726fefd9486334a17e63ed57fb90e9d3bf", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6191.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7f45bae636100689f917e56d87c316b6196a58e02bad3f46e12d1af617a79ec", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4790.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32f991ec7080aed7044beac05329f80a6d455ec99cd6109b877437040a44366e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3540.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a62f9ce4bbaafa0544b481923edc1fb2a8ab64449836f7f8112e80ac2c8ab6c9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4401.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83b13b1bfbdf277ed4634e49bbd3d6a0827ee0d5f24be019f9574004c538d307", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2668.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb81b50d40454aa4816b265b805103e1e965449e33bf47c8893bd8795c78e4ae", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5457.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef883429009ea79f7bb3d71990685a9feddb2c3256b0a95f7a918624b51fa409", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4568.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd6e79a5d31792b48b2c6ee3274521497c764d9cf9f64b7a5095688114a391ac", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3757.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38d4a439e6349c7cfb6d5887d09e71a5da5e4f7bbc02c7fe20f246ad8f809626", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3242.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b1096e49d2040250fbb0c6338840d4b18e7c01297f34b46c701bf171a4b769b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5816.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0da313863a5fb830b2e383129cda8cc9d68e4ccda7e2505a3fb492d5861f555", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5380.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a752a11ea409f65b6ae3d5dbdb602f3fd0cfd742e7f21a3d029458ca7de1686", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5338.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "687f5f8dd4c2afbf84f13382f3f7584d0a6b541a5bfa68770845aecbaff52762", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3716.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1dc281488b2ffe58a7607d41befc00b0a952c7a83dfafeb2586a9e0287e3672", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3346.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1543aa1015730c0d62906cd43b15d75f2272892b17960c648a0a0e72dd50b36c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4196.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10eabdf0268bb31e0d1454d90fa721ce9f5eff0bab830d49eb1ebb6d2bc0b442", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-6001.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e035886f7f5b475a2a35c9708fe82f725b91209d7689a742be1f05e77a17882", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4745.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "775004c083bf2b01ab83a0dec477918db440ee2dd2c826e7bc38ecd05f543fac", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5504.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef8bac14dfe61ee6013d7a5bf3f2c27812ce4e8f0a9679e2d36d7d7efaaec4c0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3983.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41586ee48835a177e9aa66654382ee1c38ee2e5cc0de36035657e17112790b4f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3595.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "577d510f29503fd89a36a381d36afc8e0677ad967573e9dbacb081babb154e80", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4227.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "535a6c15aad900d6cadcca67765d8ad9c109a551d8db483a206bb1888d319a1e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-1926.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd2692c07a29d9d24c5c1fbfb0cd03015fd91f3b503745eb1a2f4f83c25d8b6e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5820.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a4b14cae141bbca9c118c2715303bbd4093ba55c9f2eedcfa246a4ce88ce90e", + "format": 1 + }, + { + "name": "changelogs/fragments/20.4.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e898205ff917c45fe4214fc93efc6d1d9dde2220fcbe84df45db6b8c6c607f3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4270.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f55cd9645e36bd5785d5abec72d9bc764c4e8ceaa3731437f4910f483c246a33", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4335.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4711cd82cb5748598d7e2a23fc9e30cc4c3f032519e85afd52023b68a9586ceb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5174.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9db9219d1ac5eea502ce44d42285a1de7f733b84df4e932e498dd9455c4151b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4159.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a306ef0946a6f6f7d6aa71e89497a3579cdab2fd8e25c448bbf5eade9e974240", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3366.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c883fec44f63c4e61696177246a87a7889570febf9938c56fb7aeef0b24037f5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3310.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d30f60360c127c3223186e9b93acb5293d846698e0a8759907fd6b99f8c77081", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5532.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dc3becc28b1813b09fab983dd4383e618afcce31513d02e08de10f6199dc302", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4773.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "596ab8e13d02ab50834525b915b9049ba8ec1c41faa708e45486816f3f66df30", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4325.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de297190d075e1d7120cba9db91ca31dfb8e93d92d39c991ee0ec34e11b6b98a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4289.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "85ad107693f03057bcaf420a98e3213798f3c40c6451b000d30be84c59b5bcb9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3370.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed4b84caf21e93e3ee9e053889ed5e7f7879c165c235b9eff6509fa050b5e129", + "format": 1 + }, + { + "name": "changelogs/fragments/20.8.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d56008d00b6fc4f20d6ab7742bfc94da88009a199fcd895769aea7e63277867", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4231.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82e13fecf55e6ca470f589609bc24b2101c02ab315a9b78474d48d88d1aba268", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3632.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2021da9fb057b6585109dd5b5c46371000782cc80b27f1f4320a1cd6ec17cc3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3262.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40e1953925ea029d66655fc474d5f1fcddac23bbd38db80800f36526f8ea3951", + "format": 1 + }, + { + "name": "changelogs/fragments/20.6.1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3dfa7436cee382333f2f49b62a4933c1293a7ea8aab027f923ca252bd5839b21", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3649.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d34723cc5e07ed7f903e22b53406f585e7a6056f6ce9d8ec234594b37c4365b0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4026.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c3f0f75f344037cad9f07196845b98053b3d0861e869bc040fa48b545c99411", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3167.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6903dfcba6ebcdde9dbeda9634d3992ecb9bc9681558b3b88f97ad4312366a1", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5725.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f5bc23d346a3efe2522c31e3f33cd1e1968f02e4a5a2f2c10133039a1dc9f9d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5733.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed7ac676edf5f879727d589835b64fe486f8b01f58b11a359114ae5f2e7a9358", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4122.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c407f5bc8d059b4947f54d50c1f52f501fd5eb9f8cd71274b1056f9c3820a6bc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4319.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c5a8ee7135f75841dd37eb705b215b57bb8568daf7a05aa4e4a1c3c5629841f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4175.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e734e97e1b8c703139713cf06474a63a2729b2ebefc16a7c623b54ce4a7acda3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5271.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1443b879c1a8154d071ec389aae36bff43e8fea37dbf09020b9e92370155cef8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4460.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ce2413a36e90dc6db7cb0943b2e18cb11dba86a561169ddeb619ffe1f593db8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5270.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2b3faf98069b953794a64b082f20d3befd86b75f79384cc2273a42e998768a0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4898.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c22318a4b441dcf2d6777cbba3917a69b56793273eff2ebad7c88aab8bb9dc07", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4031.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2c8ac73e796058a5abb756c45b95805cd4a398f7786dc7143ded2cbfb7ddf51", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4573.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fc36d0cd5c2dcae3cc159b197f569ae772eebbf47efeab0c18f358ac918d6c0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4123.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a8aadeff8900595e01575c5f2cffee0a61dbdd87c422d62f4444ff4168cf8a8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5677.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c466f51690219f9cdafcb9b7292a1f71693f11980c6583347a30f72ce7ca8fd", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5948.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a655555927155d4d3e69d27a48053d6e956b087c08e1c3435d151054558c8588", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4565.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78795517c3a72e4bcdd177564df3d8697f7f687af57fadea79f2c8eb8cbe2bfd", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5548.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ed75f919e1155163f928f7eee44a54d264c8738f9066fad2a50241092824164", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3536.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5533f88072485ff0eeee6a48e5224ad8e0e6e2b0e5dc106cfda8b36d92664c11", + "format": 1 + }, + { + "name": "changelogs/fragments/github-110.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d02db7b00f929109805287d023b8d3fccfce9ecc8ccd086e45684fa30dbfab6", + "format": 1 + }, + { + "name": "changelogs/fragments/20.6.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3876566d792ae492f13ddc9e2b51be33ee82d319554c5fd8a9a627fabe14f6c", + "format": 1 + }, + { + "name": "changelogs/fragments/no-story-1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "296057d50bf6cb9d6511bf0f923339f7907d0f3842dcc33e81129c2e5bfcc94e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3633.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f166bb1b6b7b1edb56f90547034091284e04310e4e3e9bf33ab2e978e13a03a0", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4119.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71b944d6a5f9c48adac5ab06d66c49fb005662aa2c3dcaad74a9bccd416d4206", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5972.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00563ab6fb55d2abaea794e2754ee0206810ddd6687618eed42be752d08e050a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3371.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b475d2aa51c1e5305b48770104d355fb59b13ec39758e4432156d7c44fab9dcc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5026.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3de4acaf4e9ed21a0011520231088bb3924125866b408563632f014acd371d2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4288.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d0de665977335b466e08d0989af943578a1441f9532e3070751828e22eccdb8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4508.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d0e2b51cbd9a990c83b4fd0ce683269cbc4d9c304a2f5be83dfe9251715a88d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3367.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f8d7742342151743a7b4a27bac65d6aa361d041a7fd2f68b99b980df5e61f268", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5899.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59c77dbaf711c570bc21569abeb11a3443fcc25bc20c4de5268f33aafde2b329", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4621.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7218bc1eca3f4571efbbf4ff754261185e05f17fb62ffdd9ba65c97fe361b0c7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4334.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc6b6edab20b571234e9c9037530c4d8cc5814c6fbff5c5336da7a37528817b4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4764.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70f33a5b1a94d063395a73abbe4786d8c0fd7c867f43b3cc93c1fa0b793ffc28", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3625.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "094cf6f8622bec443fdbeedb516dcef0fa6ec3d15c2c4a6f5d7d587b32c12628", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4676.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fef4631d7f5d04f580f6c2b599840985721aa3dd321ec625cdb1c702afa040cb", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4719.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "281dcd4fd1f536d8e3bbd5fc7eca49de97980d0b4ec1809ac093aabf199432c2", + "format": 1 + }, + { + "name": "changelogs/fragments/20.4.1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4242572b057b756ccc9f2aba9906d429af16f874474edc95a7165062934d0c2d", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5505.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f648d34a6f36e8be349e8815554590486848a4fdb6a99833eee72976344592a", + "format": 1 + }, + { + "name": "changelogs/fragments/19.11.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "588905e810b54e945acb17d2fe859cb954a71f5573bb58c2794416949695b88a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5913.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec5cec7464001c1cf4ab1782ef32bede1536dd9723fb9067fa1c6c92bc8a902e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4197.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "517054faba13c0f73a534c18233d56d7bae4da4f8c40663c25fd0a53f1c3ffd2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4343.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ee8f6cdb35936d87526628913636fb0da7805de622ae0b8287e507335c122e8", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4206.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71b3c1f7af3f37de211e24e984bcaf406f5f40548ae14e648838cb94df41e1a9", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5047.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "186ffca050c86042a8a8897fcc085ae7202f0115e577c0bfbccc3b241a6feb86", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3490.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc1946e88e1f68ed450cb26dafb86a27b514f47e30fd32fc8864fc723ee0ef84", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5952.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b8771c93210e39643e428c8eeba51ccf0484d47c2257729906e9107df85c44a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5015.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9c69095e5e93ab9b30a65df7fe3a8761268b5e4a35cdcc95604accd4b0b2336", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3994.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b78ca8d8e988a8ce8d656e86d2bac2ef3ef4e179d1dda7356463e2dd4bc235a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5285.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89b85fec239f79895d751676636f762fb12640552d6115120c949e06b6aa0eac", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5790.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d870700ca638bb45c733e72778dfed471941d68a2da6320a16d6e223ffef102e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4882.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43bc98b957a7c68973f0fa78b09a34881ab8e591b54147e268857b1b910f3dee", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3900.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a582f01b4672739f8f659637d4efc158a6fce9939ff58ddf1825ec25a7379a31", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5138.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae6882ca7203f6624ea52e13b47f7e837671f401babc655b3a9087e1c87ad0b2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5092.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e1884727d3e3d300f89e21091cfac85c10f51641eab01b6ef6e8d81799bcdb2", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5354.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa1fe49c3057ea2f0886e72e3f783e694d1ae0fcba0bec755eacde3e1986a435", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2491.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41f9f7c55cbfa0dc9c41498e2f29f604008a476881249bebe990455187d91123", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3668.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66133e03e511cf821e0d14cc5e8c713e85b7308db4436059c71b5993e9dbf88b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3392.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "581117e6e90f32ce72d72e5e5b34c1bdc481976cbd64c50ce4a2669bfcdb633b", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4457.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7663026b270ec923cfc63e84270b700ef6579fac1140cf705cba40c6efbb56e3", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5246.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f037991d24c8e989c8831fc2b560cf95faa3810a6fd4bf7693714d1fb99ae94", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5809.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14bbf72704e4249482a14b382ead1875a8659ac4df15c3b77a0211def6f1a26f", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5179.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4548072df86afd52619193f3234f7e7e82e04c75008c09772002d46b422046e", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4857.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "373c0d238c9bc09f081de34b1fe78647c90750f8fc611b7bb69b08a70fa2f7c4", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5591.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8102825d6d9dc3eede2741320b29caa689855a700ca86adbd87ffa2086d36cf7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-5084.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a12c27f45055331101864ca012f588ae805263a014cc235a8ded539c082c8376", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4800.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3878312cfae86a76574d308973feb6f45ebbd52e9dcbbf784bca58d2c6aa91e", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7bdfc1bd5a49a86880d38b82b1c6a050df3d9659c3b74236293f81f536fc5ad6", + "format": 1 + }, + { + "name": "changelogs/.plugin-cache.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cbe9c8b5709e3f329667fa54b5c8ffa1333cc49805e51b788d3e52fca5c8b5d", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36885965bfb781446612c16dc008aa38596609693958a42b92e4fb6b760f1a95", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bc903a9f9e960e83709868d4e500ec990596a01b84de2ac5a2541df22497f8f", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c53a65c2fd561c87eaabf1072ef5dcab8653042bc15308465f52413585eb6271", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97cb7648d0f84e5c0f9d2fc408018eec5e7adb07ff090f7dcbd97fd8ca419d0b", + "format": 1 + }, + { + "name": ".github/workflows/codeql-analysis.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e89d2b00db9f14c907f1e3a9151c5746eb89e68a3f3a796964de347371b8c7a5", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a63778d527b7f382ebb71cde6c15af7fd2c08200b52166eb965e2e41ad107128", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5c7a13cc7671292c5db2d70aca7f127baf02c39759909c911ad32c9f4e39bb0", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a52c539892ce8413167fb8a94b7c299efe4727087fa0478b76c0c92c1f18cce", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50916c09a55828383e593285e59044d240349554c2a54eed9ccbe367d97b7d6a", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/MANIFEST.json b/ansible_collections/netapp/ontap/MANIFEST.json new file mode 100644 index 000000000..333653f31 --- /dev/null +++ b/ansible_collections/netapp/ontap/MANIFEST.json @@ -0,0 +1,32 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "ontap", + "version": "22.7.0", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "storage", + "ontap", + "netapp" + ], + "description": "NetApp ONTAP Collection", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/netapp.ontap", + "documentation": null, + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": "https://github.com/ansible-collections/netapp.ontap/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5fcf72da57259f9f93e9dd46099622ff04e193cef08c9aacedb13253ce404b31", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/README.md b/ansible_collections/netapp/ontap/README.md new file mode 100644 index 000000000..f086b09e9 --- /dev/null +++ b/ansible_collections/netapp/ontap/README.md @@ -0,0 +1,1811 @@ +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/ontap/index.html) +![example workflow](https://github.com/ansible-collections/netapp.ontap/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.ontap/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.ontap) +[![Discord](https://img.shields.io/discord/855068651522490400)](https://discord.gg/NetApp) + +============================================================= + + netapp.ontap + + NetApp ONTAP Collection + + Copyright (c) 2022 NetApp, Inc. All rights reserved. + Specifications subject to change without notice. + +============================================================= + +# Installation +```bash +ansible-galaxy collection install netapp.ontap +``` +To use this collection, add the following to the top of your playbook, without this you will be using Ansible 2.9 version of the module +``` +collections: + - netapp.ontap +``` +# Requirements +- ansible version >= 2.9 +- requests >= 2.20 +- netapp-lib version >= 2018.11.13 + +# Module documentation +https://docs.ansible.com/ansible/devel/collections/netapp/ontap/ + +# Need help +Join our [Discord](https://discord.gg/NetApp) and look for our #ansible channel. + +# Deprecation warning +The ONTAP 9.12.1 release will be the last ONTAP version to support ONTAPI (ZAPI). Future versions of ONTAP will only support REST. +This change will effect the modules listed below. + +### Replaced Modules +These are modules user will need to migrate from their playbook to use the REST version of the module. Do note because REST +return values differently than ZAPI you will need to update your playbooks to work with the new module. + - na_ontap_broadcast_domain_ports -> na_ontap_ports + - na_ontap_command -> na_ontap_rest_cli + - na_ontap_firewall_policy -> na_ontap_service_policy + - na_ontap_info -> na_ontap_rest_info + - na_ontap_ldap -> na_ontap_ldap_client + - na_ontap_motd -> na_ontap_login_messages + - na_ontap_ntfs_dacl -> na_ontap_file_security_permissions + - na_ontap_ntfs_sd -> na_ontap_file_security_permissions + - na_ontap_qos_adaptive_policy_group -> na_ontap_qos_policy_group + - na_ontap_volume_snaplock -> na_ontap_volume + - na_ontap_vserver_cifs_security -> na_ontap_cifs_server + - na_ontap_zapit -> na_ontap_restit + +### Deprecated Modules +The following modules do not have REST equivalent APIs. They will stop working on any ONTAP release after CY22-Q4 release. + - na_ontap_cg_snapshot + - na_ontap_file_directory_policy + - na_ontap_svm_options + - na_ontap_quota_policy + +# Release Notes + +## 22.7.0 + +### New Options + - na_ontap_s3_buckets - new option `nas_path` added, requires ONTAP 9.12.1 or later. + +### Minor Changes + - na_ontap_name_mappings - added choices `s3_win` and `s3_unix` to `direction`, requires ONTAP 9.12.1 or later. + +### Bug Fixes + - na_ontap_login_messages - fix `banner` and `motd_message` not idempotent when trailing '\n' is present. + - na_ontap_login_messages - fix idempotent issue on `show_cluster_motd` option when try to set banner or motd_message for the first time in REST. + +### New Modules + - na_ontap_active_directory_domain_controllers - Added REST support for ONTAP 9.12.0 or later and cli support for lower versions. + +## 22.6.0 + +### New Options + - na_ontap_aggregate - new REST only option `tags` added, requires ONTAP 9.13.1 or later version. + - na_ontap_qos_policy_group - new REST only option `adaptive_qos_options.block_size` added, requires ONTAP 9.10.1 or later version. + - na_ontap_s3_buckets - new option `type` added, requires ONTAP 9.12.1 or later. + - na_ontap_volume - new REST only option `tags` added, requires ONTAP 9.13.1 or later version. + +### Minor Changes + - retry create or modify when getting temporarily locked from changes error in REST. + - na_ontap_export_policy - added `name` to modify in module output if export policy is renamed. + - na_ontap_broadcast_domain - skip checking modify when `state` is absent. + - na_ontap_qos_policy_group - skip checking modify when `state` is absent. + +### Bug Fixes + - na_ontap_export_policy - fix cannot delete export policy if `from_name` option is set. + - na_ontap_file_security_permissions_acl - fix idempotent issue on `propagation_mode` option. + - na_ontap_qos_policy_group - one occurrence of msg missing in call to fail_json. + - na_ontap_s3_groups - fix error when current s3 groups has no users configured. + - na_ontap_s3_groups - fix cannot modify `policies` if not configured in create. + - na_ontap_security_certificates - fix duplicate entry error when `vserver` option is set with admin vserver. + - na_ontap_snapmirror_policy - fix cannot disable `is_network_compression_enabled` in REST. + - na_ontap_svm - skip modify validation when trying to delete svm. + - na_ontap_qos_adaptive_policy_group - rename group when from_name is present and state is present. + +### New Modules + - na_ontap_kerberos_interface - Enable or disable Kerberos interface config, requires ONTAP 9.7 or later version. + +## 22.5.0 + +### New Options + - na_ontap_cifs - new options `browsable` and `show_previous_versions` added in REST. + +### Minor Changes + - na_ontap_cifs - removed default value for `unix_symlink` as its not supported with ZAPI. + - na_ontap_cifs - updated documentation and examples for REST. + - na_ontap_file_security_permissions - updated module examples. + - na_ontap_ipspace - improved module fail error message in REST. + - na_ontap_rest_info - improved documentation for `parameters` option. + - na_ontap_security_config - updated documentation for `supported_cipher_suites`. + - na_ontap_user: option ``vserver`` is not required with REST, ignore this option to create cluster scoped user. + +### Bug Fixes + - na_ontap_cifs - throw error if set `unix_symlink` in ZAPI. + - na_ontap_cifs - throw error if used options that require recent ONTAP version. + - na_ontap_file_security_permissions - error if more than one desired ACLs has same user, access, access_control and apply_to. + - na_ontap_file_security_permissions - fix idempotency issue on `acls.propagation_mode` option. + - na_ontap_file_security_permissions - fix TypeError when current acls is None. + - na_ontap_ipspace - fix cannot delete ipspace if `from_ipspace` is present. + - na_ontap_iscsi_security - error module if use_rest never is set. + - na_ontap_iscsi_security - fix KeyError on `outbound_username` option. + - na_ontap_qtree - ignore job entry doesn't exist error when creating qtree with REST to bypass ONTAP issue with FSx. + - na_ontap_quotas - ignore job entry doesn't exist error when creating quota with REST to bypass ONTAP issue with FSx. + - na_ontap_security_config - fix error on specifying protocol version `TLSv1.1` when fips is enabled. + - na_ontap_snapmirror - error if identity_preservation set in ZAPI. + - na_ontap_snapmirror - Added option `identity_preservation` support from ONTAP 9.11.1 in REST. + +## 22.4.1 + +### Bug Fixes + - na_ontap_snapmirror - fix invalid value error for return_timeout, modified the value to 120 seconds. + +## 22.4.0 + +### New Options + - na_ontap_security_config - new option `supported_cipher_suites` added in REST. + - na_ontap_snapmirror - new option `identity_preservation` added in REST. + +### Minor Changes + - na_ontap_user_role - add support for rest-role `privileges.access` choices `read_create`, `read_modify` and `read_create_modify`, supported only with REST and requires ONTAP 9.11.1 or later versions. + - na_ontap_user_role - `command_directory_name` requires 9.11.1 or later with REST. + - na_ontap_rest_cli - returns changed only for verbs POST, PATCH and DELETE. + - na_ontap_snapmirror - wait 600 seconds for snapmirror creation to complete in REST. + - na_ontap_security_config - Replaced private cli with REST API for GET and PATCH. + - na_ontap_security_config - Added support for protocol version `TLSV1.3`. + +### Bug Fixes + - na_ontap_interface - fix incorrect warning raised when try to rename interface. + - na_ontap_ldap_client - fix duplicate entry error when used cluster vserver in REST. + - na_ontap_ldap_client - fix KeyError on `name` in ZAPI. + - na_ontap_san_create - Role documentation correct to from nas to san. + - na_ontap_user - fix KeyError vserver in ZAPI. + - na_ontap_user_role - report error when command/command directory path set in REST for ONTAP earlier versions. + - na_ontap_volume - fix error when try to unmount volume and modify snaplock attribute. + - na_ontap_volume - fix idempotent issue when try to offline and modify other volume options. + - na_ontap_vserver_audit - fix invalid field value error of log retention count and duration. + - na_ontap_vserver_audit - Added `log_path` option in modify. + +### New Modules + - na_ontap_ems_filter - Create, delete, or modify EMS filters. + +## 22.3.0 + +### New Options + - na_ontap_aggregate - new option `allow_flexgroups` added. + - na_ontap_cifs - new options `access_based_enumeration`, `change_notify`, `encryption`,`home_directory`, `oplocks`, `show_snapshot`, `allow_unencrypted_access`, `namespace_caching` and `continuously_available` added in REST. + - na_ontap_nfs - new options `root`, `windows` and `security` added in REST. + - na_ontap_volume_efficiency - new option `volume_name` added. + +### Minor Changes + - na_ontap_dns - `skip_validation` option requires 9.9.1 or later with REST and ignored for cluster DNS operations. + - na_ontap_dns - support cluster scope for modify and delete. + - na_ontap_interface - do not attempt to migrate FC interface if desired `home_port`, `home_node` and `current_port`, `current_node` are same. + - na_ontap_license - support for NLF v2 license files. + - na_ontap_user_role - `path` is required if `privileges` set in REST. + - na_ontap_user_role - `command_directory_name` is required if `privileges` not set in REST. + - na_ontap_volume_efficiency - updated private cli with REST API. + - na_ontap_volume_efficiency - REST support for `policy` requires 9.7 or later, `path` requires 9.9.1 or later and `volume_efficiency` and `start_ve_scan_old_data` requires 9.11.1 or later. + - na_ontap_volume_efficiency - `schedule`, `start_ve_scan_all`, `start_ve_build_metadata`, `start_ve_delete_checkpoint`, `start_ve_queue_operation`, `start_ve_qos_policy` and `stop_ve_all_operations` options are not supported with REST. + +### Bug Fixes + - na_ontap_aggregate - try to offline aggregate when disk add operation is in progress in ZAPI. + - na_ontap_interface - fix idempotency issue when `home_port` not set in creating FC interface. + - na_ontap_rest_info - fix field issue with private/cli and support/autosupport/check APIs. + - na_ontap_snapshot - fix cannot modify `snapmirror_label`, `expiry_time` and `comment` if not configured in create. + - na_ontap_user_role - fix AttributeError 'NetAppOntapUserRole' object has no attribute 'name'. + - na_ontap_user_role - fix duplicate entry error in ZAPI. + - na_ontap_user_role - fix entry does not exist error when trying to delete privilege in REST. + - na_ontap_user_role - fix KeyError on `vserver`, `command_directory_name` in ZAPI and `path`, `query` in REST. + - na_ontap_volume_efficiency - fix idempotent issue when state is absent and efficiency options are set in ZAPI. + +### New Modules + - na_ontap_vserver_audit - added REST only support for create, modify and delete vserver audit configuration. + - na_ontap_vserver_peer_permissions - added REST only support for create, modify and delete vserver peer permissions for an SVM. + +## 22.2.0 + +### New Options + - na_ontap_interface - new option `fail_if_subnet_conflicts` - requires REST and ONTAP 9.11.1 or later. + - na_ontap_interface - option `subnet_name` is now supported with REST with ONTAP 9.11.1 or later. + - na_ontap_snapmirror - support `schedule` with REST and ONTAP 9.11.1, add alias `transfer_schedule`. + - na_ontap_snapmirror_policy - new option `copy_latest_source_snapshot`, `create_snapshot_on_source` and `sync_type` added in REST. + - na_ontap_snapmirror_policy - new option `transfer_schedule` for async policy types. + - na_ontap_snapmirror_policy - Added new choices sync and async for policy type in REST. + - na_ontap_iscsi - new option `target_alias` added in REST. + +### Minor Changes + - na_ontap_active_directory - add `fqdn` as aliases for `domain`. + - na_ontap_snapmirror_policy - warn when replacing policy type `async_mirror`, `mirror_vault` and `vault` with policy type `async` and `strict_sync_mirror`, `sync_mirror` with `sync` in REST. + - na_ontap_snapmirror_policy - add unsupported options in ZAPI. + - na_ontap_snapmirror_policy - add support for cluster scoped policy with REST. + - na_ontap_svm - warn in case of mismatch in language option spelling. + +### Bug Fixes + - na_ontap_quotas - fix duplicate entry error when trying to add quota rule in REST. + - na_ontap_quotas - fix entry does not exist error when trying to modify quota status in REST. + - na_ontap_security_ipsec_policy - fix cannot get current security IPsec policy with ipspace. + - na_ontap_security_ipsec_policy - fix KeyError on `authentication_method`. + - na_ontap_security_key_manager - requires 9.7+ to work with REST. + - na_ontap_snapmirror_policy - fixed idempotency issue on `identity_preservation` option when using REST. + - na_ontap_snapmirror_policy - fix desired policy type not configured in cli with REST. + - na_ontap_snapmirror_policy - deleting all retention rules would trigger an error when the existing policy requires at least one rule. + - na_ontap_snapmirror_policy - index error on rules with ONTAP 9.12.1 as not all fields are present. + - na_ontap_volume - fixed bug preventing unmount and taking a volume off line at the same time + +### Added REST support to existing modules + - na_ontap_active_directory - REST requires ONTAP 9.12.1 or later. + +### New Modules + - na_ontap_cifs_local_user - ability to create/modify/delete a cifs local user + + +## 22.1.0 + +### New Options + - na_ontap_interface - new option `probe_port` for Azure load balancer. + - na_ontap_snapmirror_policy - new option `copy_all_source_snapshots` added in REST. + +### Minor Changes + - na_ontap_aggregate - add support for `service_state` option from ONTAP 9.11.1 or later in REST. + - na_ontap_aggregate - add `name` to modify in module output if aggregate is renamed. + - na_ontap_cifs_local_group_member - Added REST API support to retrieve, add and remove CIFS group member. + - na_ontap_cifs_local_group_member - REST support is from ONTAP 9.10.1 or later. + - na_ontap_cifs_server - skip `service_state` option in create if not set. + - na_ontap_quotas - for qtree type, allow quota_target in path format /vol/vol_name/qtree_name in REST. + - na_ontap_volume - report error if vserver does not exist or is not a data vserver on create. + +### Bug Fixes + - na_ontap_active_directory - updated doc as only ZAPI is supported at present, force an error with use_rest always. + - na_ontap_aggregate - fix examples in documentation. + - na_ontap_aggregate - allow adding disks before trying to offline aggregate. + - na_ontap_aggregate - error if `unmount_volumes` set in REST, by default REST unmount volumes when trying to offline aggregate. + - na_ontap_aggregate - fix `service_state` option skipped if its set to offline in create. + - na_ontap_cg_snapshot - updated doc with deprecation warning as it is a ZAPI only module. + - na_ontap_cifs_server - fix `service_state` is stopped when trying to modify cifs server in REST. + - na_ontap_file_directory_policy - updated doc with deprecation warning as it is a ZAPI only module. + - na_ontap_file_security_permissions - updated notes to indicate ONTAP 9.9.1 or later is required. + - na_ontap_file_security_permissions_acl - updated notes to indicate ONTAP 9.9.1 or later is required. + - na_ontap_interface - fix cannot set `location.home_node.name` and `location.node.name` error when trying to create or modify fc interface. + - na_ontap_interface - fix unexpected argument error with `ipspace` when trying to get fc interface. + - na_ontap_interface - fix error when trying to migrate fc interface in REST. + - na_ontap_qtree - fix cannot get current qtree if enclosed in curly braces. + - na_ontap_quota_policy - updated doc with deprecation warning as it is a ZAPI only module. + - na_ontap_quotas - fix default tree quota rule gets modified when `quota_target` is set in REST. + - na_ontap_quotas - fix user/group quota rule without qtree gets modified when `qtree` is set. + - na_ontap_svm_options - updated doc with deprecation warning as it is a ZAPI only module. + +### New Modules + - na_ontap_cifs_local_group - added REST only support for create, modify, rename and delete CIFS local group of an SVM. + - na_ontap_security_ipsec_ca_certificate - add or delete IPsec CA certificate. + - na_ontap_security_ipsec_config - Manage IPsec configuration. + - na_ontap_security_ipsec_policy - Create, modify and delete security IPsec policy. + +## 22.0.1 + +### Bug Fixes + - na_ontap_interface - fix `netmask` not idempotent in REST. + - na_ontap_mcc_mediator - Fix error that would prevent mediator deletion. + +### Minor Changes + - na_ontap_interface - allow setting `netmask` with netmask length in ZAPI. + +## 22.0.0 + +### Major Changes + - With this release all modules except for the modules called up above now support REST. + - ZAPI calls will continue to work, but will return a warning that they are deprecated and users should migrate to REST. + +### New Rest Info + - na_ontap_rest_info - support added for protocols/active-directory. + - na_ontap_rest_info - support added for protocols/cifs/group-policies. + - na_ontap_rest_info - support added for protocols/nfs/connected-client-settings. + - na_ontap_rest_info - support added for security/aws-kms. + +### Minor Changes + - na_ontap_debug - report python executable version and path. + - na_ontap_net_routes - `metric` option is supported from ONTAP 9.11.0 or later in REST. + - na_ontap_service_policy - update services for 9.11.1 - make it easier to add new services. + - na_ontap_snapmirror - `schedule` is handled through `policy` for REST. + - na_ontap_snapmirror_policy - improve error reporting and report errors in check_mode. + - na_ontap_volume - `wait_for_completion` and `check_interval` is now supported for volume move and encryption in REST. + +### New Options + - na_ontap_export_policy_rule - `allow_suid`, `allow_device_creation` and `chown_mode` is now supported from ONTAP 9.9.1 or later in REST. + - na_ontap_export_policy_rule - `allow_device_creation` and `chown_mode` is now supported in ZAPI. + - na_ontap_ldap_client - new option `skip_config_validation`. + - na_ontap_service_policy - new options `known_services` and `additional_services`. + - na_ontap_snapmirror_policy - new option `identity_preservation` added. + - na_ontap_snapmirror_policy - `name` added as an alias for `policy_name`. + - na_ontap_volume - new option `max_wait_time` added. + - na_ontap_volume - new REST option `analytics` added. + - tracing - allow to selectively trace headers and authentication. + +### Bug Fixes + - iso8601 filters - fix documentation generation issue. + - na_ontap_info - Added vserver in key_fields of net_interface_info. + - na_ontap_interface - fix error where an `address` with an IPV6 ip would try to modify each time playbook was run. + - na_ontap_ldap_client - `servers` not accepted when using ZAPI and `ldap_servers` not handling a single server properly. + - na_ontap_rest_info - fixed error where module would fail silently when using `owning_resouce` and a non-existent vserver. + - na_ontap_user_role - fixed Invalid JSON input. Expecting "privileges" to be an array. + - na_ontap_volume - fix error when trying to move encrypted volume and `encrypt` is True in REST. + - na_ontap_volume - fix error when trying to unencrypt volume in REST. + - na_ontap_volume - fix KeyError on `aggregate_name` when trying to unencrypt volume in ZAPI. + - na_ontap_volume - `snapdir_access` is not supported by REST and will currently inform you now if you try to use it with REST. + - na_ontap_volume - when deleting a volume, don't report a warning when unmount is successful (error is None). + - ZAPI only modules -- no longer have `use_rest` as an option. + - tracing - redact headers and authentication secrets by default. + +### New Modules + - na_ontap_local_hosts - added REST only support for create, update and delete IP to hostname mappings for SVM of the cluster. + - na_ontap_bgp_peer_group - Create, modify and delete BGP peer groups. + - na_ontap_file_security_permissions - Update SD and ACLs. + - na_ontap_file_security_permissions_acl - Add, update, or delete a single ACL. + - na_ontap_name_mappings - added REST only support for create, update and delete name mappings configuration. + +## 21.24.1 + +### Bug Fixes + - new meta/execution-environment.yml is failing ansible-builder sanitize step. + +## 21.24.0 + +### New Options + - na_ontap_cluster - `timezone.name` to modify cluster timezone. REST only. + - na_ontap_restit - `files` and `accept_header` to support multipart/form-data for write and read. + - na_ontap_snmp_traphosts - Added `host` option in REST. + - na_ontap_svm - Added `ndmp` option to services in REST. + +### Minor Changes + - na_ontap_ems_destination - improve error messages - augment UT coverage (thanks to bielawb). + - na_ontap_interface - `dns_domain_name` is now supported from ONTAP 9.9.0 or later in REST. + - na_ontap_interface - `is_dns_update_enabled` is now supported from ONTAP 9.9.1 or later in REST. + - na_ontap_interface - attempt to set interface_type to `ip` when `protocols` is set to "none". + - na_ontap_vserver_create - `protocol` is now optional. `role` is not set when protocol is absent. + - na_ontap_vserver_create - `firewall_policy` is not set when `service_policy` is present, as `service_policy` is preferred. + - na_ontap_vserver_create - added `interface_type`. Only a value of `ip` is currently supported. + - na_ontap_vserver_create - added support for vserver management interface when using REST. + - na_ontap_rest_info - Allowed the support of multiple subsets and warn when using `**` in fields. + +### New Rest Info + - All REST GET's up to and including 9.11.1 that do not require a UUID/KEY to be past in are now supported + - na_ontap_rest_info - support added for cluster. + - na_ontap_rest_info - support added for cluster/counter/tables. + - na_ontap_rest_info - support added for cluster/licensing/capacity-pools. + - na_ontap_rest_info - support added for cluster/licensing/license-managers. + - na_ontap_rest_info - support added for cluster/metrocluster/svms. + - na_ontap_rest_info - support added for cluster/sensors. + - na_ontap_rest_info - support added for name-services/cache/group-membership/settings. + - na_ontap_rest_info - support added for name-services/cache/host/settings. + - na_ontap_rest_info - support added for name-services/cache/netgroup/settings. + - na_ontap_rest_info - support added for name-services/cache/setting. + - na_ontap_rest_info - support added for name-services/cache/unix-group/settings. + - na_ontap_rest_info - support added for name-services/ldap-schemas. + - na_ontap_rest_info - support added for network/fc/fabrics. + - na_ontap_rest_info - support added for network/fc/interfaces. + - na_ontap_rest_info - support added for network/fc/interfaces. + - na_ontap_rest_info - support added for network/ip/subnets. + - na_ontap_rest_info - support added for protocols/cifs/connections. + - na_ontap_rest_info - support added for protocols/cifs/netbios. + - na_ontap_rest_info - support added for protocols/cifs/session/files. + - na_ontap_rest_info - support added for protocols/cifs/shadow-copies. + - na_ontap_rest_info - support added for protocols/cifs/shadowcopy-sets. + - na_ontap_rest_info - support added for protocols/nfs/connected-client-maps. + - na_ontap_rest_info - support added for security. + - na_ontap_rest_info - support added for security/multi-admin-verify. + - na_ontap_rest_info - support added for security/multi-admin-verify/approval-groups. + - na_ontap_rest_info - support added for security/multi-admin-verify/requests. + - na_ontap_rest_info - support added for security/multi-admin-verify/rules. + - na_ontap_rest_info - support added for storage/file/moves. + - na_ontap_rest_info - support added for storage/pools. + +### Bug Fixes + - na_ontap_cifs - fix KeyError on `unix_symlink` field when using REST. + - na_ontap_cifs_acl - use `type` when deleting unix-user or unix-group from ACL in ZAPI. + - na_ontap_command - do not run command in check_mode (thanks to darksoul42). + - na_ontap_ems_destination - fix idempotency issue when `type` value is rest_api. + - na_ontap_interface - improve error message when interface type is required with REST. + - na_ontap_qtree - fix KeyError on unix_permissions. + - na_ontap_rest_cli - do not run command in check_mode (thanks to darksoul42). + - na_ontap_s3_groups - if `policies` is None module should no longer fail + - na_ontap_user - fix idempotency issue with 9.11 because of new is_ldap_fastbind field. + - na_ontap_volume_efficiency -- Missing fields in REST get should return None and not crash module. + +### Added REST support to existing modules + - na_ontap_quotas - added REST support. + - na_ontap_net_subnet - added REST support. + +### New Module + - na_ontap_security_ssh - Updates the SSH server configuration for cluster and SVM scopes - REST only. + +### New Filters + - iso8601_duration_to_seconds - to convert a duration in ISO 8601 format to seconds. + - iso8601_duration_from_seconds - to convert seconds to a duration in ISO 8601 format. + +## 21.23.0 + +### New Options + - all REST modules - new option `force_ontap_version` to bypass permission issues with custom vsadmin roles. + - na_ontap_export_policy_rule - new option `force_delete_on_first_match` to support duplicate entries on delete. + - na_ontap_rest_info - new option `ignore_api_errors` to report error in subset rather than breaking execution. + - na_ontap_security_key_manager - new REST options `external` and `vserver` for external key manager. + - na_ontap_security_key_manager - new REST option `onboard` for onboard key manager. + +### New Rest Info + - na_ontap_rest_info - support added for protocols/vscan/on-access-policies. + - na_ontap_rest_info - support added for protocols/vscan/on-demand-policies. + - na_ontap_rest_info - support added for protocols/vscan/scanner-pools. + +### Bug Fixes + - na_ontap_cifs_acl - use `type` if present when fetching existing ACL with ZAPI. + - na_ontap_cifs_local_user_set_password - when using ZAPI, do not require cluster admin privileges. + - na_ontap_cluster_config Role - incorrect license was show - updated to GNU General Public License v3.0 + - na_ontap_flexcache - properly use `origin_cluster` in GET but not in POST when using REST. + - na_ontap_kerberos_realm - fix cannot modify `comment` option in ZAPI. + - na_ontap_lun_copy - fix key error on `source_vserver` option. + - na_ontap_s3_buckets - fix options that cannot be modified if not set in creating s3 buckets. + - na_ontap_s3_buckets - fix TypeError if `conditions` not present in policy statements. + - na_ontap_s3_buckets - updated correct choices in options `audit_event_selector.access` and `audit_event_selector.permission`. + - na_ontap_ntp - fixed typeError on `key_id` field with ZAPI. + +### Minor Changes + - na_ontap_export_policy_rule - `rule_index` is now optional for create and delete. + - na_ontap_interface - improved validations for unsupported options with FC interfaces. + - na_ontap_kerberos_realm - change `kdc_port` option type to int. + - na_ontap_ntp - for ONTAP version 9.6 or below fall back to ZAPI when `use_rest` is set to `auto` or fail when REST is desired. + - na_ontap_ntp_key - fail for ONTAP version 9.6 or below when `use_rest` is set to `auto` or when REST is desired. + - na_ontap_volume - attempt to delete volume even when unmounting or offlining failed. + +### Added REST support to existing modules + - na_ontap_cifs_local_user_set_password -- added REST support. + - na_ontap_cluster_ha - added REST support. + - na_ontap_lun_copy - added REST support. + - na_ontap_lun_map_reporting_nodes - added REST support. + - na_ontap_kerberos_realm - added REST support. + - na_ontap_security_key_manager - added REST support. + - na_ontap_ucadapter - added REST support. + - na_ontap_user_role -- added REST support. + +### New Module + - na_ontap_ems_destination - Manage EMS destination - Contribution by Bartosz Bielawski (@bielawb). + +## 21.22.0 + +### New Options + - na_ontap_job_schedule - new option `cluster` added. + - na_ontap_ldap_client - Added `ldaps_enabled` option in ZAPI. + +### Bug Fixes + - na_ontap_cluster_peer - report an error if there is an attempt to use the already peered clusters. + - na_ontap_interface - fix error deleting fc interface if it is enabled in REST. + - na_ontap_license - fix intermittent KeyError when adding licenses with REST. + - na_ontap_lun - Added `lun_modify` after `app_modify` to fix idempotency issue. + - na_ontap_name_service_switch - fix AttributeError 'NoneType' object has no attribute 'get_children' if `sources` is '-' in current. + - na_ontap_name_service_switch - fix idempotency issue on `sources` option. + - na_ontap_security_key_manager - fix KeyError on `node`. + - na_ontap_ntp - fixed typeError on `key_id` field with ZAPI. + - na_ontap_service_processor_network - fix idempotency issue on `dhcp` option in ZAPI. + - na_ontap_service_processor_network - fail module when trying to disable `dhcp` and not setting one of `ip_address`, `netmask`, `gateway_ip_address` different than current. + - na_ontap_service_processor_network - allow manually configuring network if all of `ip_address`, `netmask`, `gateway_ip_address` set and `dhcp` not present in REST. + - na_ontap_service_processor_network - fix setting `dhcp` v4 takes more than `wait_for_completion` retries. + - na_ontap_service_processor_network - fix `wait_for_completion` ignored when trying to enable service processor network interface in ZAPI. + - na_ontap_software_update - improve error handling if image file is already present. + - na_ontap_software_update - improve error handling when node is rebooting with REST. + - na_ontap_software_update - when using REST with ONTAP 9.9 or later, timeout value is properly set. + - na_ontap_user - enforce that all methods are under a single application. + - na_ontap_user - is_locked was not properly read with ZAPI, making the module not idempotent. + +### Minor Changes + - na_ontap_license - return list of updated package names. + - na_ontap_nvme_subsystem - report subsystem as absent if vserver cannot be found when attempting a delete. + - na_ontap_rest_info - Will now warn you if a `gather_subset` is not supported by your version of ONTAP. + - na_ontap_rest_info - Will now include a message in return output about `gather_subset` not supported by your version of ONTAP. + - na_ontap_security_key_manager - indicate that `node` is not used and is deprecated. + - na_ontap_software_update - deleting a software package is now supported with ZAPI and REST. + - na_ontap_svm - added vserver as a convenient alias for name when using module_defaults. + - na_ontap_wait_for_condition - added `snapmirror_relationship` to wait on `state` or `transfer_state` (REST only). + - na_ontap_ldap - fall back to ZAPI when `use_rest` is set to `auto` or fail when REST is desired. + - all modules - do not fail on ZAPI EMS log when vserver does not exist. + +### Added REST support to existing modules + - na_ontap_ldap_client - added REST support. + - na_ontap_name_service_switch - added REST support. + - na_ontap_wait_for_condition - added REST support. + +## 21.21.0 + +### New Options + - na_ontap_cluster_config role - support `broadcast_domain` and `service_policy` with REST. + - na_ontap_interface - support `broadcast_domain` with REST. + - na_ontap_lun - support `qos_adaptive_policy_group` with REST. + - na_ontap_ntp - added `key_id` for both REST and ZAPI + - na_ontap_qtree - added `unix_user` and `unix_group` options in REST. + - na_ontap_snapmirror - new option `validate_source_path` to disable this validation. + - na_ontap_unix_user - added new option `primary_gid` aliased to `group_id`. + - na_ontap_vserver_create role - support `broadcast_domain`, `ipspace`, and `service_policy` with REST. + +### Bug Fixes + - na_ontap_interface - enforce requirement for address/netmask for interfaces other than FC. + - na_ontap_interface - fix idempotency issue for cluster scoped interfaces when using REST. + - na_ontap_interface - fix potential node and uuid issues with LIF migration. + - na_ontap_interface - FC interfaces - scope is not supported. + - na_ontap_interface - FC interfaces - home_port is not supported for ONTAP 9.7 or earlier. + - na_ontap_interface - FC interfaces - home_node should not be sent as location.home_node. + - na_ontap_interface - FC interfaces - service_policy is not supported. + - na_ontap_interface - ignore 'none' when using REST rather than reporting unexpected protocol. + - na_ontap_lun - catch ZAPI error on get LUN. + - na_ontap_lun - ignore resize error if no change was required. + - na_ontap_lun - report error if flexvol_name is missing when using ZAPI. + - na_ontap_net_subnet - fix `ipspace` option ignored in getting net subnet. + - na_ontap_qtree - fix idempotency issue on `unix_permissions` option. + - na_ontap_s3_buckets - accept `sid` as a number or a string. + - na_ontap_s3_buckets - Module will set `enabled` during create. + - na_ontap_s3_buckets - Module will not fail on create if no `policy` is given. + - na_ontap_svm - KeyError on CIFS when using REST with ONTAP 9.8 or lower. + - na_ontap_volume - fix idempotency issue on `unix_permissions` option. + - na_ontap_volume - `volume_security_style` was not modified if other security options were present with ZAPI. + - na_ontap_vserver_create role - add rule index as it is now required. + - na_ontap_snapmirror - relax check for source when using REST. + - na_ontap_snapmirror - fix potential issue when destination is using REST but source is using ZAPI. + +### New Module + - na_ontap_ntp_key - Manage NTP keys. + - na_ontap_s3_groups - Manage s3 groups. + - na_ontap_s3_policies - Manage S3 policies. + +### Minor Changes + - na_ontap_info - add quota-policy-info. + - na_ontap_info - add computed serial_hex and naa_id for lun_info. + - na_ontap_login_messages - support cluster scope when using REST. + - na_ontap_motd - deprecated in favor of `na_ontap_login_messages`. Fail when use_rest is set to `always` as REST is not supported. + - na_ontap_rest_info - add computed serial_hex and naa_id for storage/luns when serial_number is present. + - na_ontap_s3_users - `secret_key` and `access_token` are now returned when creating a user. + - na_ontap_snapmirror - validate source endpoint for ZAPI and REST, accounting for vserver local name. + - na_ontap_snapmirror - improve errror messages to be more specific and consistent. + - na_ontap_snapmirror - wait for the relationship to come back to idle after a resync. + - na_ontap_user - accept `service_processor` as an alias for `service-processor` with ZAPI, to be consistent with REST. + +### Known Issues: + - na_ontap_snapshot - added documentation to use UTC format for `expiry_time`. + +### Added REST support to existing modules + - na_ontap_service_processor_network - Added REST support. + - na_ontap_unix_group - added REST support. + - na_ontap_unix_user - added REST support. + - na_ontap_volume - now defaults to REST with `use_rest: auto`, like every other module. ZAPI can be forced with `use_rest: never`. + + +## 21.20.0 + +### Bug Fixes + - na_ontap_autosupport - fix idempotency issue on `state` field with ONTAP 9.11. + - na_ontap_autosupport - TypeError on `support` and `ondemand_enabled` field with ONTAP 9.11. + - na_ontap_net_subnet - delete fails if ipspace is different than Default. + - na_ontap_portset - fixed idempotency issue when `ports` has identical values. + - na_ontap_portset - fixed error when trying to remove partial ports from portset if igroups are bound to it. + - na_ontap_quotas - fix another quota operation is currently in progress issue. + - na_ontap_quotas - fix idempotency issue on `threshold` option. + - na_ontap_snapmirror - support for SSL certificate authentication for both sides when using ONTAP. + - na_ontap_snapmirror - fix issue where there was no wait on quiesce before aborting. + - na_ontap_snapmirror - fix issue where there was no wait on the relationship to end transferring. + - na_ontap_snapmirror - fix error in snapmirror restore by changing option `clean_up_failure` as optional when using ZAPI. + - na_ontap_software_update - now reports changed=False when the package is already present. + - na_ontap_user - fix idempotency issue with SSH with second_authentication_method. + - na_ontap_vscan_on_access_policy - fixed options `filters`, `file_ext_to_exclude` and `paths_to_exclude` cannot be reset to empty values in ZAPI. + - na_ontap_zapit - fix failure in precluster mode. + +### New Options + - na_ontap_cifs_server - added `security` options in REST. + - na_ontap_export_policy_rule - added `from_index` for both REST and ZAPI. Change `rule_index` to required. + - na_ontap_snapmirror - new option `peer_options` to define source connection parameters. + - na_ontap_snapmirror - new option `transferring_time_out` to define how long to wait for transfer to complete on create or initialize. + - na_ontap_vscan_on_access_policy - new REST options `scan_readonly_volumes` and `only_execute_access` added. + - na_ontap_vserver_cifs_security - added option `encryption_required_for_dc_connections` and `use_ldaps_for_ad_ldap` in ZAPI. + +### New Modules + - na_ontap_s3_service - Manage S3 services. + - na_ontap_s3_users - Manage S3 users. + +### Minor Changes + - na_ontap_aggregate - updated `disk_types` in documentation. + - na_ontap_snapmirror - when deleting, attempt to delete even when the relationship cannot be broken. + - na_ontap_snapmirror - rewrite update for REST using POST to initiate transfer. + - na_ontap_svm - added documentation for `allowed_protocol`, ndmp is default in REST. + - na_ontap_user - add support for SAML authentication_method. + - na_ontap_vserver_cifs_security - added `use_ldaps_for_ad_ldap` and `use_start_tls_for_ad_ldap` as mutually exclusive in ZAPI. + - na_ontap_vserver_cifs_security - fall back to ZAPI when `use_rest` is set to `auto` or fail when REST is desired. + +### Added REST support to existing modules + - na_ontap_nvme_namespace - added REST support. + - na_ontap_nvme_subsystem - added REST support. + - na_ontap_portset - added REST support. + - na_ontap_software_update - added REST support. + - na_ontap_vscan_on_access_policy - added REST support. + - na_ontap_vscan_on_demand_task - added REST support. + +## 21.19.1 + +### Bug Fixes + - na_ontap_cluster_config - fix the role to be able to create intercluster LIFs with REST (ipspace is required). + - na_ontap_interface - ignore `vserver` when using REST if role is one of 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. + - na_ontap_nvme - fixed invalid boolean value error for `status_admin` when creating nvme service in ZAPI. + - na_ontap_nvme - fixed `status_admin` option is ignored if set to False when creating nvme service in REST. + - na_ontap_service_policy - fixed error in modify by changing resulting json of an existing record in REST. + - na_ontap_snapmirror - when using ZAPI, wait for the relationship to be quiesced before breaking. + - na_ontap_snapmirror - when using REST with a policy, fix AttributeError - 'str' object has no attribute 'get'. + +## 21.19.0 + +### Minor Changes + - na_ontap_interface - use REST when `use_rest` is set to `auto`. + - na_ontap_qos_adaptive_policy_group - warn about deprecation, fall back to ZAPI or fail when REST is desired. + - na_ontap_quotas - support TB as a unit, update doc with size format description. + +### New Options + - na_ontap_cifs_server - Added new option `force` for create, delete and `from_name`, `force` for rename when using REST. + - na_ontap_qos_policy_group - Added REST only supported option `adaptive_qos_options` for configuring adaptive policy. + - na_ontap_qos_policy_group - Added REST only supported option `fixed_qos_options` for configuring max/min throughput policy. + - na_ontap_rest_info - new option `owning_resource` for REST info that requires an owning resource. For instance volume for a snapshot + - na_ontap_cifs - Added `unix_symlink` option in REST. + +### New Module + - na_ontap_s3_bucket - Manage S3 Buckets. + +### Bug Fixes + - na_ontap_cifs - fixed `symlink_properties` option silently ignored for cifs share creation when using REST. + - na_ontap_cifs - fixed error in modifying comment if it is not set while creating CIFS share in REST. + - na_ontap_command - fix typo in example. + - na_ontap_interface - rename fails with 'inconsistency in rename action' for cluster interface with REST. + - na_ontap_login_messages - fix typo in examples for username. + - na_ontap_nfs - fix TypeError on NoneType as `tcp_max_xfer_size` is not supported in earlier ONTAP versions. + - na_ontap_nfs - fix `Extra input` error with ZAPI for `is-nfsv4-enabled`. + - na_ontap_quotas - Fix idempotency issue on `disk_limit` and `soft_disk_limit`. + - na_ontap_rest_info: REST API's with hyphens in the name will now be converted to underscores when `use_python_keys` is set to `True` so that YAML parsing works correctly. + - na_ontap_service_policy - fix examples in documentation. + - na_ontap_volume - use `time_out` value when creating/modifying/deleting volumes with REST rathar than hardcoded value. + - na_ontap_volume - QOS policy was not set when using NAS application. + - na_ontap_volume - correctly warn when attempting to modify NAS application. + - na_ontap_volume - do not set encrypt on modify, as it is already handled with specialized ZAPI calls. + - na_ontap_volume_autosize - improve error reporting. + +### New Rest Info + - na_ontap_rest_info - support added for application/consistency-groups + - na_ontap_rest_info - support added for cluster/fireware/history + - na_ontap_rest_info - support added for cluster/mediators + - na_ontap_rest_info - support added for cluster/metrocluster/dr-groups + - na_ontap_rest_info - support added for cluster/metrocluster/interconnects + - na_ontap_rest_info - support added for cluster/metrocluster/operations + - na_ontap_rest_info - support added for cluster/ntp/keys + - na_ontap_rest_info - support added for cluster/web + - na_ontap_rest_info - support added for name-services/local-hosts + - na_ontap_rest_info - support added for name-services/unix-groups + - na_ontap_rest_info - support added for name-services/unix-users + - na_ontap_rest_info - support added for network/ethernet/switch/ports + - na_ontap_rest_info - support added for network/fc/ports + - na_ontap_rest_info - support added for network/http-proxy + - na_ontap_rest_info - support added for network/ip/bgp/peer-groups + - na_ontap_rest_info - support added for protocols/audit + - na_ontap_rest_info - support added for protocols/cifs/domains + - na_ontap_rest_info - support added for protocols/cifs/local-groups + - na_ontap_rest_info - support added for protocols/cifs/local-users + - na_ontap_rest_info - support added for protocols/cifs/sessions + - na_ontap_rest_info - support added for protocols/cifs/users-and-groups/privilege + - na_ontap_rest_info - support added for protocols/cifs/unix-symlink-mapping + - na_ontap_rest_info - support added for protocols/file-access-tracing/events + - na_ontap_rest_info - support added for protocols/file-access-tracing/filters + - na_ontap_rest_info - support added for protocols/fpolicy + - na_ontap_rest_info - support added for protocols/locks + - na_ontap_rest_info - support added for protocols/ndmp + - na_ontap_rest_info - support added for protocols/ndmp/nodes + - na_ontap_rest_info - support added for protocols/ndmp/sessions + - na_ontap_rest_info - support added for protocols/ndmp/svms + - na_ontap_rest_info - support added for protocols/nfs/connected-clients + - na_ontap_rest_info - support added for protocols/nfs/export-policies/rules (Requires owning_resource to be set) + - na_ontap_rest_info - support added for protocols/nfs/kerberos/interfaces + - na_ontap_rest_info - support added for protocols/nvme/subsystem-controllers + - na_ontap_rest_info - support added for protocols/nvme/subsystem-maps + - na_ontap_rest_info - support added for protocols/s3/buckets + - na_ontap_rest_info - support added for protocols/s3/services + - na_ontap_rest_info - support added for protocols/san/iscsi/sessions + - na_ontap_rest_info - support added for protocols/san/portsets + - na_ontap_rest_info - support added for protocols/san/vvol-bindings + - na_ontap_rest_info - support added for security/anti-ransomware/suspects + - na_ontap_rest_info - support added for security/audit + - na_ontap_rest_info - support added for security/audit/messages + - na_ontap_rest_info - support added for security/authentication/cluster/ad-proxy + - na_ontap_rest_info - support added for security/authentication/cluster/ldap + - na_ontap_rest_info - support added for security/authentication/cluster/nis + - na_ontap_rest_info - support added for security/authentication/cluster/saml-sp + - na_ontap_rest_info - support added for security/authentication/publickeys + - na_ontap_rest_info - support added for security/azure-key-vaults + - na_ontap_rest_info - support added for security/certificates + - na_ontap_rest_info - support added for security/gcp-kms + - na_ontap_rest_info - support added for security/ipsec + - na_ontap_rest_info - support added for security/ipsec/ca-certificates + - na_ontap_rest_info - support added for security/ipsec/policies + - na_ontap_rest_info - support added for security/ipsec/security-associations + - na_ontap_rest_info - support added for security/key-manager-configs + - na_ontap_rest_info - support added for security/key-managers + - na_ontap_rest_info - support added for security/key-stores + - na_ontap_rest_info - support added for security/login/messages + - na_ontap_rest_info - support added for security/ssh + - na_ontap_rest_info - support added for security/ssh/svms + - na_ontap_rest_info - support added for storage/cluster + - na_ontap_rest_info - support added for storage/file/clone/split-loads + - na_ontap_rest_info - support added for storage/file/clone/split-status + - na_ontap_rest_info - support added for storage/file/clone/tokens + - na_ontap_rest_info - support added for storage/monitored-files + - na_ontap_rest_info - support added for storage/qos/workloads + - na_ontap_rest_info - support added for storage/snaplock/audit-logs + - na_ontap_rest_info - support added for storage/snaplock/compliance-clocks + - na_ontap_rest_info - support added for storage/snaplock/event-retention/operations + - na_ontap_rest_info - support added for storage/snaplock/event-retention/policies + - na_ontap_rest_info - support added for storage/snaplock/file-fingerprints + - na_ontap_rest_info - support added for storage/snaplock/litigations + - na_ontap_rest_info - support added for storage/switches + - na_ontap_rest_info - support added for storage/tape-devices + - na_ontap_rest_info - support added for storage/volumes/snapshots (Requires owning_resource to be set) + - na_ontap_rest_info - support added for support/auto-update + - na_ontap_rest_info - support added for support/auto-update/configurations + - na_ontap_rest_info - support added for support/auto-update/updates + - na_ontap_rest_info - support added for support/configuration-backup + - na_ontap_rest_info - support added for support/configuration-backup/backups + - na_ontap_rest_info - support added for support/coredump/coredumps + - na_ontap_rest_info - support added for support/ems/messages + - na_ontap_rest_info - support added for support/snmp + - na_ontap_rest_info - support added for support/snmp/users + - na_ontap_rest_info - support added for svm/migrations + +### Added REST support to existing modules + - na_ontap_igroup_initiator - Added REST support. + - na_ontap_iscsi - Added REST support. + - na_ontap_nvme - Added REST support. + - na_ontap_qos_policy_group - Added REST support. + + +## 21.18.1 + +### Bug Fixes + - na_ontap_iscsi - fixed error starting iscsi service on vserver where Service, adapter, or operation already started. + - na_ontap_lun - Fixed KeyError on options `force_resize`, `force_remove` and `force_remove_fenced` in ZAPI. + - na_ontap_lun - Fixed `force_remove` option silently ignored in REST. + - na_ontap_snapshot_policy - Don't validate parameter when state is `absent` and fix KeyError on `comment`. + +## 21.18.0 + +### New Options + - na_ontap_export_policy_rule - new option `ntfs_unix_security` for NTFS export UNIX security options added. + - na_ontap_volume - add support for SnapLock - only for REST. + - na_ontap_volume - new option `max_files` to increase the inode count value. + +### Minor Changes + - na_ontap_cluster_config role - use na_ontap_login_messages as na_ontap_motd is deprecated. + - na_ontap_debug - report ansible version and ONTAP collection version. + - na_ontap_snapmirror -- Added more descriptive error messages for REST + - na_ontap_svm - add support for web services (ssl modify) - REST only with 9.8 or later. + - na_ontap_volume - allow to modify volume after rename. + - na_ontap_vserver_create role - support max_volumes option. + +### Bug Fixes + - na_ontap_aggregate - Fixed error in delete aggregate if the `disk_count` is less than current disk count. + - na_ontap_autosupport - Fixed `partner_address` not working in REST. + - na_ontap_command - document that a READONLY user is not supported, even for show commands. + - na_ontap_disk_options - ONTAP 9.10.1 returns on/off rather than True/False. + - na_ontap_info - [#54] Fixes issue with na_ontap_info failing in 9.1 because of `job-schedule-cluster`. + - na_ontap_iscsi - Fixed issue with `start_state` always being set to stopped when creating an ISCSI. + - na_ontap_qtree - Fixed issue with `oplocks` not being changed during a modify in ZAPI. + - na_ontap_qtree - Fixed issue with `oplocks` not warning user about not being supported in REST. + - na_ontap_snapshot - fix key error on volume when using REST. + - na_ontap_snapshot - add error message if volume is not found with REST. + - na_ontap_svm - fixed KeyError issue on protocols when vserver is stopped. + - na_ontap_volume - fix idempotency issue with compression settings when using REST. + - na_ontap_volume - do not attempt to mount volume if current state is offline. + - na_ontap_vserver_peer - Fixed AttributeError if `dest_hostname` or `peer_options` not present. + - na_ontap_vserver_peer - Fixed `local_name_for_peer` and `local_name_for_source` options silently ignored in REST. + - na_ontap_vserver_peer - Added cluster peer accept code in REST. + - na_ontap_vserver_peer - Get peer cluster name if remote peer exist else use local cluster name. + - na_ontap_vserver_peer - ignore job entry doesn't exist error with REST to bypass ONTAP issue with FSx. + - na_ontap_vserver_peer - report error if SVM peer does not see a peering relationship after create. + - Fixed ONTAP minor version ignored in checking minimum ONTAP version. + +### Added REST support to existing modules + - na_ontap_efficiency_policy - Added REST support. + - na_ontap_lun - Added Rest Support. + - na_ontap_snapshot_policy - Added Rest Support. + +## 21.17.3 + +### Bug Fixes + - na_ontap_lun_map - TypeError - '>' not supported between instances of 'int' and 'str '. + - na_ontap_snapmirror - Fixed bug by adding use_rest condition for the REST support to work when `use_rest: always`. + +## 21.17.2 + +### Bug Fixes + - na_ontap_rest_info - Fixed an issue with adding field to specific info that didn't have a direct REST equivalent. + - na_ontap_lun_map - Fixed bug when deleting lun map using REST. + +## 21.17.1 + +### Bug Fixes + - na_ontap_lun_map - fixed bugs resulting in REST support to not work. + +## 21.17.0 + +### New Options + - na_ontap_cifs_acl - new option `type` for user-group-type. + +### Minor changes + - all modules that only support ZAPI - warn when `use_rest: always` is ignored. + +### Bug Fixes + - na_ontap_aggregate - Fixed UUID issue when attempting to attach object store as part of creating the aggregate with REST. + - na_ontap_cifs_server - error out if ZAPI only options `force` or `workgroup` are used with REST. + - na_ontap_cluster_peer - Fixed KeyError if both `source_intercluster_lifs` and `dest_intercluster_lifs` are not present in creating cluster. + - na_ontap_rest_info - Fixed example with wrong indentation for `use_python_keys`. + +### Added REST support to existing modules + - na_ontap_cifs - Added REST support to the CIFS share module. + - na_ontap_cifs_acl - Added REST support to the cifs share access control module. + - na_ontap_cluster_peer - Added REST support. + - na_ontap_lun_map - Added REST support. + - na_ontap_nfs - Added Rest Support. + - na_ontap_volume_clone - Added REST support. + +## 21.16.0 + +### New Options + - na_ontap_aggregate - Added `disk_class` option for REST and ZAPI. + - na_ontap_aggregate - Extended accepted `disk_type` values for ZAPI. + - na_ontap_volume - `logical_space_enforcement` to specifies whether to perform logical space accounting on the volume. + - na_ontap_volume - `logical_space_reporting` to specifies whether to report space logically on the volume. + - na_ontap_volume - `tiering_minimum_cooling_days` to specify how many days must pass before inactive data in a volume using the Auto or Snapshot-Only policy is considered cold and eligible for tiering. + +### Bug Fixes + - na_ontap_active_directory - Fixed idempotency and traceback issues. + - na_ontap_aggregate - Fixed KeyError on unmount_volumes when offlining a volume if option is not set. + - na_ontap_aggregate - Report an error when attempting to change snaplock_type. + - na_ontap_igroup - `force_remove_initiator` option was ignored when removing initiators from existing igroup. + - na_ontap_info - Add active_directory_account_info. + - na_ontap_security_certificates - `intermediate_certificates` option was ignored. + - na_ontap_user - Fixed lock state is not set if password is not changed. + - na_ontap_user - Fixed TypeError 'tuple' object does not support item assignment. + - na_ontap_user - Fixed issue when attempting to change pasword for absent user when set_password is set. + - na_ontap_volume - Report error when attempting to change the nas_application tiering control from disalllowed to required, or reciprocally. + - na_ontap_volume - Fixed error with unmounting junction_path in rest. + - na_ontap_volume - Fixed error when creating a flexGroup when `aggregate_name` and `aggr_list_multiplier` are not set in rest. + - four modules (mediator, metrocluster, security_certificates, wwpn_alias) would report a None error when REST is not available. + - module_utils - fixed KeyError on Allow when using OPTIONS method and the API failed. + +### Added REST support to existing modules + - na_ontap_aggregate - Added REST support. + - na_ontap_cifs_server - Added REST support to the cifs server module. + - na_ontap_ports - Added REST support to the ports module. + - na_ontap_volume_clone -- Added REST support. + +## 21.15.1 + +### Bug Fixes + - na_ontap_export_policy_rule - Fixed bug that prevent ZAPI and REST calls from working correctly. + +## 21.15.0 + +### New Options + - na_ontap_broadcast_domain - new REST only option `from_ipspace` added. + - na_ontap_svm - new REST options of svm admin_state `stopped` and `running` added. + +### Bug Fixes + - na_ontap_info - Fixed KeyError on node for aggr_efficiency_info option against a metrocluster system. + - na_ontap_volume - Fixed issue that would fail the module in REST when changing `is_online` if two vserver volume had the same name. + - na_ontap_volume_efficiency - Removed restriction on policy name. + - na_ontap_broadcast_domain - fix idempotency issue when `ports` has identical values. + +### Minor Changes + - na_ontap_broadcast_domain_ports - warn about deprecation, fall back to ZAPI or fail when REST is desired. + - na_ontap_rest_info - update documention for `fields` to clarify the list of fields that are return by default. + - na_ontap_volume - If using REST and ONTAP 9.6 and `efficiency_policy` module will fail as `efficiency_policy` is not supported in ONTAP 9.6. + +### Added REST support to existing modules + - na_ontap_broadcast_domain - Added REST support to the broadcast domain module. + - na_ontap_export_policy_rule -- Added Rest support for Export Policy Rules. + - na_ontap_firmware_upgrade - REST support to download firmware and reboot SP. + - na_ontap_license - Added REST support to the license module. + - na_ontap_snapmirror - Added REST support to the na_ontap_snapmirror module. + +## 21.14.1 + +### Bug Fixes + - na_ontap_net_ifgrp - fix error in modify ports with zapi. + +## 21.14.0 + +### New Options + - na_ontap_aggregate - new option `encryption` to enable encryption with ZAPI. + - na_ontap_net_ifgrp - new REST only options `from_lag_ports`, `broadcast_domain` and `ipspace` added. + - na_ontap_restit - new option `wait_for_completion` to support asynchronous operations and wait for job completion. + - na_ontap_volume_efficiency - new option `storage_efficiency_mode` for AFF only with 9.10.1 or later. + +### Bug Fixes + - na_ontap_cifs_local_user_modify - unexpected argument `name` error with REST. + - na_ontap_cifs_local_user_modify - KeyError on `description` or `full_name` with REST. + - na_ontap_export_policy - fix error if more than 1 verser matched search name, the wrong uuid could be given. + - na_ontap_interface - fix error where module will fail for ONTAP 9.6 if use_rest: was set to auto. + - na_ontap_net_routes - metric was not always modified with ZAPI. + - na_ontap_net_routes - support cluster-scoped routes with REST. + - na_ontap_vserver_delete role - report error if ONTAP version is 9.6 or older. + +### Minor Changes + - na_ontap_vserver_delete role - added set_fact to accept `netapp_{hostname|username|password}` or `hostname`, `username` and `password` variables. + - na_ontap_vserver_delete role - do not report an error if the vserver does not exist. + +### Added REST support to existing modules + - na_ontap_fcp -- Added REST support for FCP. + - na_ontap_net_ifgrp - Added REST support to the net ifgrp module. + - na_ontap_net_port - Added REST support to the net port module. + - na_ontap_volume - Added REST support to the volume module. + - na_ontap_vserver_peer - Added REST support to the vserver_peer module. + +## 21.13.1 + +### Bug Fixes + - cluster scoped modules are failing on FSx with 'Vserver API missing vserver parameter' error. + +## 21.13.0 + +### Minor Changes + - na_ontap_object_store: support modifying an object store config with REST. + - PR15 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +### New Options + - na_ontap_cluster - add `force` option when deleting a node. + - na_ontap_object_store: new REST options `owner` and `change_password`. + - na_ontap_net_vlan - new options `broadcast_domain`, `ipspace` and `enabled` when using REST. + +### Bug Fixes + - na_ontap_cluster - `single_node_cluster` was silently ignored with REST. + - na_ontap_cluster - switch to ZAPI when DELETE is required with ONTAP 9.6. + - na_ontap_snapshot - `expiry_time` required REST api, will return error if set when using ZAPI. + - na_ontap_snapshot - `snapmirror_label` is supported with REST on ONTAP 9.7 or higher, report error if used on ONTAP 9.6. + - na_ontap_snapmirror - `source_path` and `source_hostname` parameters are not mandatory to delete snapmirror relationship when source cluster is unknown, if specified it will delete snapmirror at destination and release the same at source side. if not, it only deletes the snapmirror at destination and will not look for source to perform snapmirror release. + - na_ontap_snapmirror - modify policy, schedule and other parameter failure are fixed. + - na_ontap_svm - module will on init if a rest only and zapi only option are used at the same time. + - na_ontap_storage_failover - KeyError on 'ha' if the system is not configured as HA. + +### Added REST support to existing modules + - na_ontap_interface - Added REST support to the interface module (for IP and FC interfaces). + - na_ontap_net_vlan - Added REST support to the net vlan module. + +## 21.12.0 + +### Minor Changes + - na_ontap_firewall_policy - added `none` as a choice for `service` which is supported from 9.8 ONTAP onwards. + +### New Options + - na_ontap_svm - new option `max_volumes`. + +### Bug Fixes + - na_ontap_job_schedule - fix idempotency issue with ZAPI when job_minutes is set to -1. + - na_ontap_job_schedule - cannot modify options not present in create when using REST. + - na_ontap_job_schedule - modify error if month is present but not changed with 0 offset when using REST. + - na_ontap_job_schedule - modify error if month is changed from some values to all (-1) when using REST. + - na_ontap_svm - support `allowed protocols` with REST for ONTAP 9.6 and later. + - na_ontap_vserver_delete role - fix typos for cifs. + +### Added REST support to existing modules + - na_ontap_cluster - Added REST support to the cluster module. + +## 21.11.0 + +### New Options + - na_ontap_interface - new option `from_name` to rename an interface. + - na_ontap_software_update - new option `validate_after_download` to run ONTAP software update validation checks. + - na_ontap_svm - new option `services` to allow and/or enable protocol services when using REST. + - na_ontap_svm - new option `ignore_rest_unsupported_options` to ignore older ZAPI options not available in REST. + +### Minor Changes + - na_ontap_software_update - remove `absent` as a choice for `state` as it has no use. + - na_ontap_svm - ignore `aggr_list: '*'` when using REST. + +### Bug Fixes + - na_ontap_job_schedule - fix idempotency issue with REST when job_minutes is set to -1. + - na_ontap_ldap_client - remove limitation on schema so that custom schemas can be used. + +### Added REST support to existing modules + - na_ontap_ntp - Added REST support to the ntp module. + +## 21.10.0 + +### Minor Changes + - na_ontap_cifs_server - `force` option is supported when state is absent to ignore communication errors. + +### Bug Fixes + - na_ontap_vserver_delete role - delete iSCSI igroups and CIFS server before deleting vserver. + - all modules - traceback on ONTAP 9.3 (and earlier) when trying to detect REST support. + + +## 21.9.0 + +### Minor Changes + - na_ontap_rest_info - The Default for `gather_subset` has been changed to demo which returns `cluster/software`, `svm/svms`, `cluster/nodes`. To return all Info must specifically list `all` in your playbook. Do note `all` is a very resource-intensive action and it is highly recommended to call just the info/APIs you need. + - na_ontap_rest_info - added file_directory_security to return the effective permissions of the directory. When using file_directory_security it must be called with gather_subsets and path and vserver must be specified in parameters. + +### New Options + - na_ontap_job_schedule - new option `month_offset` to explictly select 0 or 1 for January. + - na_ontap_object_store - new options `port`, `certificate_validation_enabled`, `ssl_enabled` for target server. + - na_ontap_rest_info - new option `use_python_keys` to replace `svm/svms` with `svm_svms` to simplify post processing. + +### Added REST support to existing modules + - na_ontap_snmp - Added REST support to the SNMP module + - na_ontap_rest_info - All Info that exist in `na_ontap_info` that has REST equivalents have been implemented. Note that the returned structure for REST and the variable names in the structure is different from the ZAPI based `na_ontap_info`. Some default variables in ZAPI are no longer returned by default in REST and will need to be specified using the `field` option + - na_ontap_rest_info - The following info's have been added `system_node_info`, `net_interface_info`, `net_port_info`, `security_login_account_info`, `vserver_peer_info`, `cluster_image_info`, `cluster_log_forwarding_info`, `metrocluster_info`, `metrocluster_node_info`, `net_dns_info`, `net_interface_service_policy_info`, `vserver_nfs_info`, `clock_info`, `igroup_info`, `vscan_status_info`, `vscan_connection_status_all_info`, `storage_bridge_info`, `nvme_info`, `nvme_interface_info`, `nvme_subsystem_info`, `cluster_switch_info`, `export_policy_info`, `kerberos_realm_info`,`sis_info`, `sis_policy_info`, `snapmirror_info`, `snapmirror_destination_info`, `snapmirror_policy_info`, `sys_cluster_alerts`, `cifs_vserver_security_info` + +### Bug Fixes + - na_ontap_job_schedule - fix documentation for REST ranges for months. + - na_ontap_quotas - attempt a retry on `13001:success` ZAPI error. Add debug data. + - na_ontap_object_store - when using REST, wait for job status to correctly report errors. + - na_ontap_rest_cli - removed incorrect statement indicating that console access is required. + +## 21.8.1 + +### Bug Fixes + - all REST modules: 9.4 and 9.5 were incorrectly detected as supporting REST. + - na_ontap_snapmirror: improve error message when option is not supported with ZAPI. + +## 21.8.0 + +### New Modules + - na_ontap_cifs_local_user_set_password - set local user password - ZAPI only. + - na_ontap_fdsd - add or remove File Directory Security Descriptor - REST only. + - na_ontap_fdsp - create or delete a File Directory Security Policy - REST only. + - na_ontap_fdspt - add, remove or modify a File Directory Security Policy Task - REST only. + - na_ontap_fdss - apply security policy settings to files and directories in a vserver. + - na_ontap_partitions - assign/unassign disk partitions - REST only. + +### New role + - na_ontap_vserver_delete - delete vserver and all associated data and resources - REST only. + +### New Options + - na_ontap_cluster_peer - new option `peer_options` to use different credentials on peer. + - na_ontap_net_port - new option `up_admin` to set administrative state. + - na_ontap_snapshot - new option `expiry_time`. + - na_ontap_vserver_peer - new option `peer_options` to use different credentials on peer. + +### Added REST support to existing modules + - na_ontap_snapshot - added REST support for snapshot creation, modification & deletion. + +### Bug Fixes + - na_ontap_cluster_peer - KeyError on dest_cluster_name if destination is unreachable. + - na_ontap_cluster_peer - KeyError on username when using certicate. + - na_ontap_export_policy_rule - change `anonymous_user_id` type to str to accept user name and user id. (A warning is now triggered when a number is not quoted.) + - na_ontap_vserver_peer - KeyError on username when using certicate. + - na_ontap_volume_clone - `parent_vserver` can not be given with `junction_path`, `uid`, or `gid` + - all modules - fix traceback TypeError 'NoneType' object is not subscriptable when hostname points to a web server. + +### Minor Changes + - na_ontap_debug - additional checks when REST is available to help debug vserver connectivity issues. + - na_ontap_net_port - change option types to bool and int respectively for `autonegotiate_admin` and `mtu`. + - na_ontap_rest_info - add examples for `parameters` option. + - na_ontap_volume - show warning when resize is ignored because threshold is not reached. + [WARNING]: resize request ignored: 2.5% is below the threshold: 10% + - na_ontap_vserver_create role - add `nfsv3`, `nfsv4`, `nfsv41` options. + - na_ontap_flexcache - corrected module name in documentation Examples + +## 21.7.0 + +### New Modules + - na_ontap_publickey - add/remove/modify public keys for SSH authentication - REST only. + - na_ontap_service_policy - add/remove/modify service policies for IP interfaces - REST only. + +### New Options + - na_ontap_cifs - new option `comment` to associate a description to a CIFS share. + - na_ontap_disks - new option `min_spares`. + - na_ontap_lun - new suboption `exclude_aggregates` for SAN application. + - na_ontap_volume - new suboption `exclude_aggregates` for NAS application. + +### Minor Changes + - na_ontap_disks - added REST support for the module. + - na_ontap_disks - added functionality to reassign spare disks from a partner node to the desired node. + - na_ontap_igroups - nested igroups are not supported on ONTAP 9.9.0 but are on 9.9.1. + - License displayed correctly in Github + +### Bug Fixes + - na_ontap_iscsi_security - cannot change authentication_type + - na_ontap_iscsi_security - IndexError list index out of range if vserver does not exist + +## 21.6.1 + +### Bug Fixes + - na_ontap_autosupport - KeyError: No element by given name validate-digital-certificate. + - na_ontap_flexcache - one occurrence of msg missing in call to fail_json. + - na_ontap_igroup - one occurrence of msg missing in call to fail_json. + - na_ontap_lun - three occurrencse of msg missing in call to fail_json. + - na_ontap_lun_map_reporting_nodes - one occurrence of msg missing in call to fail_json. + - na_ontap_snapmirror - one occurrence of msg missing in call to fail_json. + +## 21.6.0 + +### New Options + - na_ontap_users - new option `application_dicts` to associate multiple authentication methods to an application. + - na_ontap_users - new option `application_strs` to disambiguate `applications`. + - na_ontap_users - new option `replace_existing_apps_and_methods`. + - na_ontap_users - new suboption `second_authentication_method` with `application_dicts` option. + - na_ontap_vserver_peer - new options `local_name_for_source` and `local_name_for_peer` added. + +### Minor changes + - na_ontap_rest_info - Added "autosupport_check_info"/"support/autosupport/check" to the attributes that will be collected when gathering info using the module. + +### Bug Fixes + - na_ontap_autosupport - TypeError - '>' not supported between instances of 'str' and 'list'. + - na_ontap_quotas - fail to reinitialize on create if quota is already on. + +## 21.5.0 + +### New Options + - na_ontap_autosupport - new option 'nht_data_enabled' to specify whether the disk health data is collected as part of the AutoSupport data. + - na_ontap_autosupport - new option 'perf_data_enabled' to specify whether the performance data is collected as part of the AutoSupport data. + - na_ontap_autosupport - new option 'retry_count' to specify the maximum number of delivery attempts for an AutoSupport message. + - na_ontap_autosupport - new option 'reminder_enabled' to specify whether AutoSupport reminders are enabled or disabled. + - na_ontap_autosupport - new option 'max_http_size' to specify delivery size limit for the HTTP transport protocol (in bytes). + - na_ontap_autosupport - new option 'max_smtp_size' to specify delivery size limit for the SMTP transport protocol (in bytes). + - na_ontap_autosupport - new option 'private_data_removed' to specify the removal of customer-supplied data. + - na_ontap_autosupport - new option 'local_collection_enabled' to specify whether collection of AutoSupport data when the AutoSupport daemon is disabled. + - na_ontap_autosupport - new option 'ondemand_enabled' to specify whether the AutoSupport OnDemand Download feature is enabled. + - na_ontap_autosupport - new option 'validate_digital_certificate' which when set to true each node will validate the digital certificates that it receives. + +### Added REST support to existing modules + - na_ontap_autosupport - added REST support for ONTAP autosupport modification. + +### Bug Fixes + - na_ontap_qtree - wait for completion when creating or modifying a qtree with REST. + - na_ontap_volume - ignore read error because of insufficient privileges for efficiency options so that the module can be run as vsadmin. + +### Minor changes + - na_ontap_info - Added "autosupport_check_info" to the attributes that will be collected when gathering info using the module. + +## 21.4.0 + +### New Modules + - na_ontap_cifs_local_user_modify: Modify a local CIFS user. + - na_ontap_disk_options: Modify storage disk options. + - na_ontap_fpolicy_event: Create, delete or modify an FPolicy policy event. + - na_ontap_fpolicy_ext_engine: Create, modify or delete an fPolicy External Engine. + - na_ontap_fpolicy_scope: Create, delete or modify an FPolicy policy scope. + - na_ontap_fpolicy_status: Enable or disable an existing fPolicy policy. + - na_ontap_snaplock_clock: Initialize snaplock compliance clock. + +### New Options + - na_ontap_igroups - new option `initiator_objects` to support initiator comments (requires ONTAP 9.9). + - na_ontap_igroups - new option `initiator_names` as a replacement for `initiators` (still supported as an alias). + +### Minor changes + - na_ontap_lun - allow new LUNs to use different igroup or os_type when using SAN application. + - na_ontap_lun - ignore small increase (lower than provisioned) and small decrease (< 10%) in `total_size`. + - na_ontap_volume_efficiency - updated to now allow for storage efficiency start and storage efficiency stop. + +### Bug fixes + - na_ontap_autosupport - warn when password is present in `proxy_url` as it makes the operation not idempotent. + - na_ontap_cluster - ignore ZAPI EMS log error when in pre-cluster mode. + - na_ontap_lun - SAN application is not supported on 9.6 and only partially supported on 9.7 (no modify). + - na_ontap_svm - iscsi current status is not read correctly (mispelled issi). + - na_ontap_volume - warn when attempting to modify application only options. + +## 21.3.1 + +### Bug fixes + - na_ontap_snapmirror: check for consistency_group_volumes always fails on 9.7, and cluster or ipspace when using endpoints with ZAPI. + +## 21.3.0 + +### New Modules + - na_ontap_domain_tunnel: Create, delete or modify the domain tunnel. + - na_ontap_storage_failover: Enables and disables storage failover. + - na_ontap_security_config: Modify the security configuration for SSL. + - na_ontap_storage_auto_giveback: Enables and disables storage auto giveback. + - na_ontap_fpolicy_policy: Create, delete or modify an fpolicy policy + +### New Options + - na_ontap_flexcache - support for `prepopulate` option when using REST (requires ONTAP 9.8). + - na_ontap_igroups - new option `igroups` to support nested igroups (requires ONTAP 9.9). + - na_ontap_ldap_client - `tcp_port` replaces `port`. + - na_ontap_volume - new suboption `dr_cache` when creating flexcache using NAS application template (requires ONTAP 9.9). + +### Minor changes + - na_ontap_debug - improve error reporting for import errors on netapp_lib. + - na_ontap_flexcache - mount/unmount the FlexCache volume when using REST. + - na_ontap_info - improve error reporting for import errors on netapp_lib, json, xlmtodict. + +### Added REST support to existing modules + - na_ontap_flexcache - added REST support for ONTAP FlexCache creation and deletion. + - na_ontap_node - added REST support for Node modify and rename. + - na_ontap_snapmirror - SVM scoped policies were not found when using a destination path with REST application. + +### Bug fixes + - na_ontap_ldap_client - `port` was incorrectly used instead of `tcp_port`. + - na_ontap_motd - added warning for deprecated and to use na_ontap_login_messages module. + - na_ontap_node - KeyError fix for location and asset-tag. + - na_ontap_volume - changes in `encrypt` settings were ignored. + - na_ontap_volume - unmount volume before deleting it when using REST. + - na_ontap_volume_efficiency - `policy` updated to allow for supported '-' as a valid entry. + - na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume efficiency. + - na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume efficiency and apply parameters in one execution. + +## 21.2.0 + +### New Modules + - na_ontap_cifs_local_group_member: Add or remove CIFS local group member + - na_ontap_volume_efficiency: Enables, disables or modifies volume efficiency + - na_ontap_log_forward: Create, delete or modify the log forward configuration + - na_ontap_lun_map_reporting_nodes: Add and remove lun map reporting nodes + +### New Options + - na_ontap_lun - new option `comment`. + - na_ontap_lun - new option `qos_adaptive_policy_group`. + - na_ontap_lun - new option `scope` to explicitly force operations on the SAN application or a single LUN. + - na_ontap_node - added modify function for location and asset tag for node. + - na_ontap_snapmirror - new options `source_endpoint` and `destination_endpoint` to group endpoint suboptions. + - na_ontap_snapmirror - new suboptions `consistency_group_volumes` and `ipspace` to endpoint options. + +### Minor changes + - na_ontap_lun - convert existing LUNs and supporting volume to a smart container within a SAN application. + - na_ontap_snapmirror - improve error reporting or warn when REST option is not supported. + - na_ontap_snapmirror - deprecate older options for source and destination paths, volumes, vservers, and clusters. + - na_ontap_snapmirror - report warning when relationship is present but not healthy. + +### Bug fixes + - na_ontap_igroup - report error when attempting to modify an option that cannot be changed. + - na_ontap_lun - `qos_policy_group` could not be modified if a value was not provided at creation. + - na_ontap_lun - `tiering` options were ignored in san_application_template. + - na_ontap_volume - returns an error now if deleting a volume with REST api fails. + - na_ontap_volume - report error from resize operation when using REST. + +### Added REST support to existing modules + - na_ontap_igroup - added REST support for ONTAP igroup creation, modification, and deletion. + +## 21.1.1 + +### Bug fixes + - All REST modules: ONTAP 9.4 and 9.5 are incorrectly detected as supporting REST with `use_rest: auto`. + +## 21.1.0 + +### New Modules + - na_ontap_debug: Diagnose netapp-lib import errors and provide useful information. + +### New Options + - na_ontap_cluster - `time_out` to wait for cluster creation, adding and removing a node. + - na_ontap_debug - connection diagnostics added for invalid ipaddress and DNS hostname errors. + - na_ontap_lun - `total_size` and `total_size_unit` when using SAN application template. + - na_ontap_snapmirror - `create_destination` to automatically create destination endpoint (ONTAP 9.7). + - na_ontap_snapmirror - `destination_cluster` to automatically create destination SVM for SVM DR (ONTAP 9.7). + - na_ontap_snapmirror - `source_cluster` to automatically set SVM peering (ONTAP 9.7). + +### Minor changes + - na_ontap_firmware_upgrade - Added a new 'storage' type as default firmware_type. + - na_ontap_info - deprecate `state` option. + - na_ontap_lun - support increasing lun_count and total_size when using SAN application template. + - na_ontap_quota - allow to turn quota on/off without providing quota_target or type. + - na_ontap_rest_info - deprecate `state` option. + - na_ontap_snapmirror - use REST API for create action if target supports it. (ZAPIs are still used for all other actions). + - na_ontap_volume - use REST API for delete operation if targets supports it. + - general - improve error reporting when older version of netapp-lib is used. + +### Bug fixes + - na_ontap_lun - REST expects 'all' for tiering policy and not 'backup'. + - na_ontap_quotas - Handle blank string idempotency issue for `quota_target` in quotas module. + - na_ontap_rest_info - `changed` was set to "False" rather than boolean False. + - na_ontap_snapmirror - report error when attempting to change relationship_type. + - na_ontap_snapmirror - fix job update failures for load_sharing mirrors. + - na_ontap_snapmirror - wait up to 5 minutes for abort to complete before issuing a delete. + - na_ontap_snmp - SNMP module wrong access_control issue and error handling fix. + - na_ontap_volume - REST expects 'all' for tiering policy and not 'backup'. + - na_ontap_volume - detect and report error when attempting to change FlexVol into FlexGroup. + - na_ontap_volume - report error if `aggregate_name` option is used with a FlexGroup. + +## 20.12.0 + +### New Options + - na_ontap_igroup - new option `os_type` to replace `ostype` (but ostype is still accepted). + - na_ontap_info - new fact: cifs_options_info. + - na_ontap_info - new fact: cluster_log_forwarding_info. + - na_ontap_info - new fact: event_notification_destination_info. + - na_ontap_info - new fact: event_notification_info. + - na_ontap_info - new fact: security_login_role_config_info. + - na_ontap_info - new fact: security_login_role_info. + - na_ontap_lun - new option `from_name` to rename a LUN. + - na_ontap_lun - new option `os_type` to replace `ostype` (but ostype is still accepted), and removed default to `image`. + - na_ontap_lun - new option `qos_policy_group` to assign a qos_policy_group to a LUN. + - na_ontap_lun - new option `san_application_template` to create LUNs without explicitly creating a volume and using REST APIs. + - na_ontap_qos_policy_group - new option `is_shared` for sharing QOS SLOs or not. + - na_ontap_quota_policy - new option `auto_assign` to assign quota policy to vserver. + - na_ontap_quotas - new option `activate_quota_on_change` to resize or reinitialize quotas. + - na_ontap_quotas - new option `perform_user_mapping` to perform user mapping for the user specified in quota-target. + - na_ontap_rest_info - Support for gather subsets: `cifs_home_directory_info, cluster_software_download, event_notification_info, event_notification_destination_info, security_login_info, security_login_rest_role_info` + - na_ontap_svm - warning for `aggr_list` wildcard value(`*`) in create\modify idempotency. + - na_ontap_volume - `compression` to enable compression on a FAS volume. + - na_ontap_volume - `inline-compression` to enable inline compression on a volume. + - na_ontap_volume - `nas_application_template` to create a volume using nas application REST API. + - na_ontap_volume - `size_change_threshold` to ignore small changes in volume size. + - na_ontap_volume - `sizing_method` to resize a FlexGroup using REST. + +### Bug fixes + - na_ontap_broadcast_domain_ports - properly report check_mode `changed`. + - na_ontap_cifs - fix for AttributeError - 'NoneType' object has no attribute 'get' on line 300 + - na_ontap_user - application parameter expects only `service_processor` but module supports `service-processor`. + - na_ontap_volume - change in volume type was ignored and now reporting an error. + - na_ontap_volume - checking for success before failure lead to 'NoneType' object has no attribute 'get_child_by_name' when modifying a Flexcache volume. + +## 20.11.0 + +### New Modules + - na_ontap_metrocluster_dr_group: Configure a Metrocluster DR group (Supports ONTAP 9.8+) + +### Minor changes + - na_ontap_cifs - output `modified` if a modify action is taken. + - na_ontap_cluster_peer: optional parameter 'ipspace' added for cluster peer. + - na_ontap_info - do not require write access privileges. This also enables other modules to work in check_mode without write access permissions. + - na_ontap_lun - support modify for space_allocation and space_reserve. + - na_ontap_mcc_mediator - improve error reporting when REST is not available. + - na_ontap_metrocluster - improve error reporting when REST is not available. + - na_ontap_wwpn_alias - improve error reporting when REST is not available. + - na_ontap_software_update - add `force_update` option to ignore current version. + - na_ontap_svm - output `modified` if a modify action is taken. + - all ZAPI modules - optimize Basic Authentication by adding Authorization header proactively. + - This can be disabled by setting the `classic_basic_authorization` feature_flag to True. + +### Bug fixes + - All REST modules, will not fail if a job fails + - na_ontap_cifs - fix idempotency issue when `show-previous-versions` is used. + - na_ontap_firmware_upgrade - fix ValueError issue when processing URL error. + - na_ontap_info - Use `node-id` as key rather than `current-version`. + - na_ontap_ipspace - invalid call in error reporting (double error). + - na_ontap_lun - `use_exact_size` to create a lun with the exact given size so that the lun is not rounded up. + - na_ontap_metrocluster: Fix issue where module would fail on waiting for rest api job + - na_ontap_software_update - module is not idempotent. + +## 20.10.0 + +### New Options +- na_ontap_rest_info: Support for gather subsets - `application_info, application_template_info, autosupport_config_info , autosupport_messages_history, ontap_system_version, storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, support_ems_filters` + +### Bug fixes +- na_ontap_aggregate: support concurrent actions for rename/modify/add_object_store and create/add_object_store. +- na_ontap_cluster: `single_node_cluster` option was ignored. +- na_ontap_info: better reporting on KeyError traceback, option to ignore error. +- na_ontap_info: KeyError on `tree` for quota_report_info. +- na_ontap_snapmirror_policy: report error when attempting to change `policy_type` rather than taking no action. +- na_ontap_volume: `encrypt: false` is ignored when creating a volume. + +## 20.9.0 + +### New Modules +- na_ontap_active_directory: configure active directory. +- na_ontap_mcc_mediator: Configure a MCC Mediator (Supports ONTAP 9.8+). +- na_ontap_metrocluster: Configure a metrocluster (Supports ONTAP 9.8+). + +### New Options +- na_ontap_cluster: `node_name` to set the node name when adding a node, or as an alternative to `cluster_ip_address` to remove a node. +- na_ontap_cluster: `state` can be set to `absent` to remove a node identified with `cluster_ip_address` or `node_name`. +- na_ontap_qtree: `wait_for_completion` and `time_out` to wait for qtree deletion when using REST. +- na_ontap_quotas: `soft_disk_limit` and `soft_file_limit` for the quota target. +- na_ontap_rest_info: Support for gather subsets - `initiator_groups_info, san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, storage_luns_info, storage_NVMe_namespaces.` + +### Bug fixes +- na_ontap_cluster: `check_mode` is now working properly. +- na_ontap_interface: `home_node` is not required in pre-cluster mode. +- na_ontap_interface: `role` is not required if `service_policy` is present and ONTAP version is 9.8. +- na_ontap_interface: traceback in get_interface if node is not reachable. +- na_ontap_job_schedule: allow 'job_minutes' to set number to -1 for job creation with REST too. +- na_ontap_qtree: fixed `None is not subscriptable` exception on rename operation. +- na_ontap_volume: fixed `KeyError` exception on `size` when reporting creation error. +- na_ontap_*: change version_added: '2.6' to version_added: 2.6.0 where applicable to satisfy sanity checker. +- netapp.py: uncaught exception (traceback) on zapi.NaApiError. + +## 20.8.0 + +### New Modules +- na_ontap_file_directory_policy: create, modify, delete vserver security file directory policy/task. +- na_ontap_ssh_command: send CLI command over SSH using paramiko for corner cases where ZAPI or REST are not yet ready. +- na_ontap_wait_for_condition: wait for event to be present or absent (currently sp_upgrade/in_progress and sp_version). + +### New Options +- na_ontap_aggregate: support `disk_size_with_unit` option. +- na_ontap_ldap_client: support `ad_domain` and `preferred_ad_server` options. +- na_ontap_rest_info: Support for gather subsets - `cloud_targets_info, cluster_chassis_info, cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, svm_nis_config_info, svm_peers_info, svm_peer-permissions_info`. +- na_ontap_rest_info: Support for gather subsets for 9.8+ - `cluster_metrocluster_diagnostics. +- na_ontap_qtree: `force_delete` option with a DEFAULT of `true` so that ZAPI behavior is aligned with REST. +- na_ontap_security_certificates:`ignore_name_if_not_supported` option to not fail if `name` is present since `name` is not supported in ONTAP 9.6 and 9.7. +- na_ontap_software_update: added `timeout` option to give enough time for the update to complete. + +### Bug fixes +- na_ontap_aggregate: `disk-info` error when using `disks` option. +- na_ontap_autosupport_invoke: `message` has changed to `autosupport_message` as Redhat has reserved this word. `message` has been alias'd to `autosupport_message`. +- na_ontap_cifs_vserver: fix documentation and add more examples. +- na_ontap_cluster: module was not idempotent when changing location or contact information. +- na_ontap_igroup: idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). +- na_ontap_igroup_initiator: idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). +- na_ontap_security_certificates: allows (`common_name`, `type`) as an alternate key since `name` is not supported in ONTAP 9.6 and 9.7. +- na_ontap_info: Fixed error causing module to fail on `metrocluster_check_info`, `env_sensors_info` and `volume_move_target_aggr_info`. +- na_ontap_snapmirror: fixed KeyError when accessing `relationship_type` parameter. +- na_ontap_snapmirror_policy: fixed a race condition when creating a new policy. +- na_ontap_snapmirror_policy: fixed idempotency issue withis_network_compression_enabled for REST. +- na_ontap_software_update: ignore connection errors during update as nodes cannot be reachable. +- na_ontap_user: enable lock state and password to be set in the same task for existing user. +- na_ontap_volume: issue when snapdir_access and atime_update not passed together. +- na_ontap_vscan_on_access_policy: `bool` type was not properly set for `scan_files_with_no_ext`. +- na_ontap_vscan_on_access_policy: `policy_status` enable/disable option was not supported. +- na_ontap_vscan_on_demand_task: `file_ext_to_include` was not handled properly. +- na_ontap_vscan_scanner_pool_policy: scanner_pool apply policy support on modification. +- na_ontap_vserver_create(role): lif creation now defaults to system-defined unless iscsi lif type. +- use_rest supports case insensitive. + +### Module documentation changes +- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. +- add `type:` and `elements:` information where missing. +- update `required:` information. + +## 20.7.0 + +### New Modules +- na_ontap_security_certificates: Install, create, sign, delete security certificates. + +### New Options: +- na_ontap_info: support `continue_on_error` option to continue when a ZAPI is not supported on a vserver, or for cluster RPC errors. +- na_ontap_info: support `query` option to specify which objects to return. +- na_ontap_info: support `vserver` tunneling to limit output to one vserver. +- na_ontap_snapmirror_policy: support for SnapMirror policy rules. +- na_ontap_vscan_scanner_pool: support modification. +- na_ontap_rest_info: Support for gather subsets - `cluster_node_info, cluster_peer_info, disk_info, cifs_services_info, cifs_share_info`. +- module_utils/netapp: add retry on wait_on_job when job failed. Abort 3 consecutive errors. + +### Bug fixes: +- na_ontap_command: replace invalid backspace characters (0x08) with '.'. +- na_ontap_firmware_download: exception on PCDATA if ONTAP returns a BEL (0x07) character. +- na_ontap_info: lists were incorrectly processed in convert_keys, returning {}. +- na_ontap_info: qtree_info is missing most entries. Changed key from `vserver:id` to `vserver:volume:id` . +- na_ontap_iscsi_security: adding no_log for password parameters. +- na_ontap_portset: adding explicit error message as modify portset is not supported. +- na_ontap_snapmirror: fixed snapmirror delete for loadsharing to not go to quiesce state for the rest of the set. +- na_ontap_ucadapter: fixed KeyError if type is not provided and mode is 'cna'. +- na_ontap_user: checked `applications` does not contain snmp when using REST API call. +- na_ontap_user: fixed KeyError if locked key not set with REST API call. +- na_ontap_user: fixed KeyError if vserver: is empty with REST API call (useful to indicate cluster scope). +- na_ontap_volume: fixed KeyError when getting info on a MVD volume + +### Example playbook +- na_ontap_pb_get_online_volumes.yml: list of volumes that are online (or offline). +- na_ontap_pb_install_SSL_certificate_REST.yml: installing SSL certificate using REST APIs. + +## 20.6.1 + +### New Options: +- na_ontap_firmware_upgrade: `reboot_sp`: reboot service processor before downloading package. +- na_ontap_firmware_upgrade: `rename_package`: rename file when downloading service processor package. +- na_ontap_firmware_upgrade: `replace_package`: replace local file when downloading service processor package. + +### Bug Fixes +- na_ontap_firmware_upgrade: images are not downloaded, but the module reports success. +- na_ontap_user: fixed KeyError if password is not provided. +- na_ontap_password: do not error out if password is identical to previous password (idempotency). + +## 20.6.0 + +### Support for SSL certificate authentication in addition to password +The ONTAP Ansible modules currently require a username/password combination to authenticate with ONTAPI or REST APIs. +It is now possible to use SSL certificate authentication with ONTAPI or REST. +You will first need to install a SSL certificate in ONTAP, see for instance the first part of: +https://netapp.io/2016/11/08/certificate-based-authentication-netapp-manageability-sdk-ontap/ +The applications that need to be authorized for `cert` are `ontapi` and `http`. + +The new `cert_filepath`, `key_filepath` options enable SSL certificate authentication. +This is mutually exclusive with using `username` and `password`. + +ONTAP does not support `cert` authentication for console, so this is not supported for `na_ontap_command`. + +SSL certificate authentication requires python2.7 or 3.x. + +### New Options +- na_ontap_disks: `disk_type` option allows to assign specified type of disk. +- na_ontap_firmware_upgrade: ignore timeout when downloading image unless `fail_on_502_error` is set to true. +- na_ontap_info: `desired_attributes` advanced feature to select which fields to return. +- na_ontap_info: `use_native_zapi_tags` to disable the conversion of '_' to '-' for attribute keys. +- na_ontap_rest_info: `fields` options to request specific fields from subset. +- na_ontap_software_update: `stabilize_minutes` option specifies number of minutes needed to stabilize node before update. +- na_ontap_snapmirror: now performs restore with optional field `source_snapshot` for specific snapshot or uses latest. +- na_ontap_ucadapter: `pair_adapters` option allows specifying the list of adapters which also need to be offline. +- na_ontap_user: `authentication_password` option specifies password for the authentication protocol of SNMPv3 user. +- na_ontap_user: `authentication_protocol` option specifies authentication protocol fo SNMPv3 user. +- na_ontap_user: `engine_id` option specifies authoritative entity's EngineID for the SNMPv3 user. +- na_ontap_user: `privacy_password` option specifies password for the privacy protocol of SNMPv3 user. +- na_ontap_user: `privacy_protocol` option specifies privacy protocol of SNMPv3 user. +- na_ontap_user: `remote_switch_ipaddress` option specifies the IP Address of the remote switch of SNMPv3 user. +- na_ontap_volume: `check_interval` option checks if a volume move has been completed and then waits this number of seconds before checking again. +- na_ontap_volume: `auto_remap_luns` option controls automatic mapping of LUNs during volume rehost. +- na_ontap_volume: `force_restore` option forces volume to restore even if the volume has one or more newer Snapshotcopies. +- na_ontap_volume: `force_unmap_luns` option controls automatic unmapping of LUNs during volume rehost. +- na_ontap_volume: `from_vserver` option allows volume rehost from one vserver to another. +- na_ontap_volume: `preserve_lun_ids` option controls LUNs in the volume being restored will remain mapped and their identities preserved. +- na_ontap_volume: `snapshot_restore` option specifies name of snapshot to restore from. +- all modules: `cert_filepath`, `key_filepath` to enable SSL certificate authentication (python 2.7 or 3.x). + +### Bug Fixes +- na_ontap_firmware_upgrade: ignore timeout when downloading firmware images by default. +- na_ontap_info: conversion from '-' to '_' was not done for lists of dictionaries. +- na_ontap_ntfs_dacl: example fix in documentation string. +- na_ontap_snapmirror: could not delete all rules (bug in netapp_module). +- na_ontap_volume: modify was invoked multiple times when once is enough. +- na_ontap_volume: fix KeyError on 'style' when volume is of type: data-protection. +- na_ontap_volume: `wait_on_completion` is supported with volume moves. +- module_utils/netapp_module: cater for empty lists in get_modified_attributes(). +- module_utils/netapp_module: cater for lists with duplicate elements in compare_lists(). + +### Example playbook +- na_ontap_pb_install_SSL_certificate.yml: installing a self-signed SSL certificate, and enabling SSL certificate authentication. + +### Added REST support to existing modules +- na_ontap_user: added REST support for ONTAP user creation, modification & deletion. + + +## 20.5.0 + +### New Options: +- na_ontap_aggregate: `raid_type` options supports 'raid_0' for ONTAP Select. +- na_ontap_cluster_peer: `encryption_protocol_proposed` option allows specifying encryption protocol to be used for inter-cluster communication. +- na_ontap_info: new fact: aggr_efficiency_info. +- na_ontap_info: new fact: cluster_switch_info. +- na_ontap_info: new fact: disk_info. +- na_ontap_info: new fact: env_sensors_info. +- na_ontap_info: new fact: net_dev_discovery_info. +- na_ontap_info: new fact: service_processor_info. +- na_ontap_info: new fact: shelf_info. +- na_ontap_info: new fact: sis_info. +- na_ontap_info: new fact: subsys_health_info. +- na_ontap_info: new fact: sysconfig_info. +- na_ontap_info: new fact: sys_cluster_alerts. +- na_ontap_info: new fact: volume_move_target_aggr_info. +- na_ontap_info: new fact: volume_space_info. +- na_ontap_nvme_namespace: `block_size` option allows specifying size in bytes of a logical block. +- na_ontap_snapmirror: snapmirror now allows resume feature. +- na_ontap_volume: `cutover_action` option allows specifying the action to be taken for cutover. + +### Bug Fixes +- REST API call now honors the `http_port` parameter. +- REST API detection now works with vserver (use_rest: Auto). +- na_ontap_autosupport_invoke: when using ZAPI and name is not given, send autosupport message to all nodes in the cluster. +- na_ontap_cg_snapshot: properly states it does not support check_mode. +- na_ontap_cluster: ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster. +- na_ontap_cluster_ha: support check_mode. +- na_ontap_cluster_peer: support check_mode. +- na_ontap_cluster_peer: EMS log wrongly uses destination credentials with source hostname. +- na_ontap_disks: support check_mode. +- na_ontap_dns: support check_mode. +- na_ontap_efficiency_policy: change `duration` type from int to str to support '-' input. +- na_ontap_fcp: support check_mode. +- na_ontap_flexcache: support check_mode. +- na_ontap_info: `metrocluster_check_info` does not trigger a traceback but adds an "error" info element if the target system is not set up for metrocluster. +- na_ontap_license: support check_mode. +- na_ontap_login_messages: fix documentation link. +- na_ontap_node: support check mode. +- na_ontap_ntfs_sd: documentation string update for examples and made sure owner or group not mandatory. +- na_ontap_ports: now support check mode. +- na_ontap_restit: error can be a string in addition to a dict. This fix removes a traceback with AttributeError. +- na_ontap_routes: support Check Mode correctly. +- na_ontap_snapmirror: support check_mode. +- na_ontap_software_update: Incorrectly stated that it support check mode, it does not. +- na_ontap_svm_options: support check_mode. +- na_ontap_volume: improve error reporting if required parameter is present but not set. +- na_ontap_volume: suppress traceback in wait_for_completion as volume may not be completely ready. +- na_ontap_volume: fix KeyError on 'style' when volume is offline. +- na_ontap_volume_autosize: Support check_mode when `reset` option is given. +- na_ontap_volume_snaplock: fix documentation link. +- na_ontap_vserver_peer: support check_mode. +- na_ontap_vserver_peer: EMS log wrongly uses destination credentials with source hostname. + +### New Modules +- na_ontap_rest_info: Gather ONTAP subset information using REST APIs (9.6 and Above). + +### Role Change +- na_ontap_cluster_config: Port Flowcontrol and autonegotiate can be set in role + +## 20.4.1 + +### New Options +- na_ontap_firmware_upgrade: `force_disruptive_update` and `package_url` options allows to make choices for download and upgrading packages. + +### Added REST support to existing modules +- na_ontap_autosupport_invoke: added REST support for sending autosupport message. + +### Bug Fixes +- na_ontap_volume: `volume_security_style` option now allows modify. +- na_ontap_info: `metrocluster_check_info` has been removed as it was breaking the info module for everyone who didn't have a metrocluster set up. We are working on adding this back in a future update + +### Role Changes +- na_ontap_vserver_create has a new default variable `netapp_version` set to 140. If you are running 9.2 or below please add the variable to your playbook and set to 120 + +## 20.4.0 + +### New Options +- na_ontap_aggregate: `disk_count` option allows adding additional disk to aggregate. +- na_ontap_info: `max_records` option specifies maximum number of records returned in a single ZAPI call. +- na_ontap_info: `summary` option specifies a boolean flag to control return all or none of the info attributes. +- na_ontap_info: new fact: iscsi_service_info. +- na_ontap_info: new fact: license_info. +- na_ontap_info: new fact: metrocluster_info. +- na_ontap_info: new fact: metrocluster_check_info. +- na_ontap_info: new fact: metrocluster_node_info. +- na_ontap_info: new fact: net_interface_service_policy_info. +- na_ontap_info: new fact: ontap_system_version. +- na_ontap_info: new fact: ontapi_version (and deprecate ontap_version, both fields are reported for now). +- na_ontap_info: new fact: qtree_info. +- na_ontap_info: new fact: quota_report_info. +- na_ontap_info: new fact: snapmirror_destination_info. +- na_ontap_interface: `service_policy` option to identify a single service or a list of services that will use a LIF. +- na_ontap_kerberos_realm: `ad_server_ip` option specifies IP Address of the Active Directory Domain Controller (DC). +- na_ontap_kerberos_realm: `ad_server_name` option specifies Host name of the Active Directory Domain Controller (DC). +- na_ontap_snapmirror_policy: REST is included and all defaults are removed from options. +- na_ontap_snapmirror: `relationship-info-only` option allows to manage relationship information. +- na_ontap_software_update: `download_only` options allows to download cluster image without software update. +- na_ontap_volume: `snapshot_auto_delete` option allows to manage auto delete settings of a specified volume. + +### Bug Fixes +- na_ontap_cifs_server: delete AD account if username and password are provided when state=absent +- na_ontap_info: return all records of each gathered subset. +- na_ontap_info: cifs_server_info: fix KeyError exception on `domain` if only `domain-workgroup` is present. +- na_ontap_iscsi_security: Fixed modify functionality for CHAP and typo correction +- na_ontap_kerberos_realm: fix `kdc_vendor` case sensitivity issue. +- na_ontap_snapmirror: calling quiesce before snapmirror break. + +### New Modules +- na_ontap_autosupport_invoke: send autosupport message. +- na_ontap_ntfs_dacl: create/modify/delete ntfs dacl (discretionary access control list). +- na_ontap_ntfs_sd: create/modify/delete ntfs security descriptor. +- na_ontap_restit: send any REST API request to ONTAP (9.6 and above). +- na_ontap_snmp_traphosts: Create and delete snmp traphosts (9.7 and Above) +- na_ontap_wwpn_alias: create/modify/delete vserver fcp wwpn-alias. +- na_ontap_zapit: send any ZAPI request to ONTAP. + +## 20.3.0 + +### New Options +- na_ontap_info: New info's added `cluster_identity_info` +- na_ontap_info: New info's added `storage_bridge_info` +- na_ontap_snapmirror: performs resync when the `relationship_state` is active and the current state is broken-off. + +### Bug Fixes +- na_ontap_vscan_scanner_pool: has been updated to match the standard format used for all other ontap modules +- na_ontap_volume_snaplock: Fixed KeyError exception on 'is-volume-append-mode-enabled' + +### New Modules +- na_ontap_snapmirror_policy: create/modify/delete snapmirror policy. + +## 20.2.0 + +### New Modules +- na_ontap_volume_snaplock: modify volume snaplock retention. + +### New Options +- na_ontap_info: New info's added `snapshot_info` +- na_ontap_info: `max_records` option to set maximum number of records to return per subset. +- na_ontap_snapmirror: `relationship_state` option for breaking the snapmirror relationship. +- na_ontap_snapmirror: `update_snapmirror` option for updating the snapmirror relationship. +- na_ontap_volume_clone: `split` option to split clone volume from parent volume. + +### Bug Fixes +- na_ontap_cifs_server: Fixed KeyError exception on 'cifs_server_name' +- na_ontap_command: fixed traceback when using return_dict if u'1' is present in result value. +- na_ontap_login_messages: Fixed example documentation and spelling mistake issue +- na_ontap_nvme_subsystem: fixed bug when creating subsystem, vserver was not filtered. +- na_ontap_svm: if snapshot policy is changed, modify fails with "Extra input: snapshot_policy" +- na_ontap_svm: if language: C.UTF-8 is specified, the module is not idempotent +- na_ontap_volume_clone: fixed 'Extra input: parent-vserver' error when running as cluster admin. +- na_ontap_qtree: Fixed issue with Get function for REST + +### Role Changes +- na_ontap_nas_create role: fix typo in README file, add CIFS example. + +## 20.1.0 + +### New Modules +- na_ontap_login_messages: create/modify/delete security login messages including banner and mtod. + +### New Options +- na_ontap_aggregate: add `snaplock_type`. +- na_ontap_info: New info's added `cifs_server_info`, `cifs_share_info`, `cifs_vserver_security_info`, `cluster_peer_info`, `clock_info`, `export_policy_info`, `export_rule_info`, `fcp_adapter_info`, `fcp_alias_info`, `fcp_service_info`, `job_schedule_cron_info`, `kerberos_realm_info`, `ldap_client`, `ldap_config`, `net_failover_group_info`, `net_firewall_info`, `net_ipspaces_info`, `net_port_broadcast_domain_info`, `net_routes_info`, `net_vlan_info`, `nfs_info`, `ntfs_dacl_info`, `ntfs_sd_info`, `ntp_server_info`, `role_info`, `service_processor_network_info`, `sis_policy_info`, `snapmirror_policy_info`, `snapshot_policy_info`, `vscan_info`, `vserver_peer_info` +- na_ontap_igroup_initiator: `force_remove` to forcibly remove initiators from an igroup that is currently mapped to a LUN. +- na_ontap_interface: `failover_group` to specify the failover group for the LIF. `is_ipv4_link_local` to specify the LIF's are to acquire a ipv4 link local address. +- na_ontap_rest_cli: add OPTIONS as a supported verb and return list of allowed verbs. +- na_ontap_volume: add `group_id` and `user_id`. + +### Bug Fixes +- na_ontap_aggregate: Fixed traceback when running as vsadmin and cleanly error out. +- na_ontap_command: stdout_lines_filter contains data only if include/exlude_lines parameter is used. (zeten30) +- na_ontap_command: stripped_line len is checked only once, filters are inside if block. (zeten30) +- na_ontap_interface: allow module to run on node before joining the cluster. +- na_ontap_net_ifgrp: Fixed error for na_ontap_net_ifgrp if no port is given. +- na_ontap_snapmirror: Fixed traceback when running as vsadmin. Do not attempt to break a relationship that is 'Uninitialized'. +- na_ontap_snapshot_policy: Fixed KeyError: 'prefix' bug when prefix parameter isn't supplied. +- na_ontap_volume: Fixed error reporting if efficiency policy cannot be read. Do not attempt to read efficiency policy if not needed. +- na_ontap_volume: Fixed error when modifying volume efficiency policy. +- na_ontap_volume_clone: Fixed KeyError exception on 'volume' + +### Added REST support to existing modules +- na_ontap_dns: added REST support for dns creation and modification on cluster vserver. + +### Role Changes + +## 19.11.0 + +### New Modules +- na_ontap_quota_policy: create/rename/delete quota policy. + +### New Options +- na_ontap_cluster: added single node cluster option, also now supports for modify cluster contact and location option. +- na_ontap_info: Now allow you use to VSadmin to get info (Must user `vserver` option). +- na_ontap_info: Added `vscan_status_info`, `vscan_scanner_pool_info`, `vscan_connection_status_all_info`, `vscan_connection_extended_stats_info` +- na_ontap_efficiency_policy: `changelog_threshold_percent` to set the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour. + +### Bug Fixes +- na_ontap_cluster: autosupport log pushed after cluster create is performed, removed license add or remove option. +- na_ontap_dns: report error if modify or delete operations are attempted on cserver when using REST. Make create operation idempotent for cserver when using REST. Support for modify/delete on cserver when using REST will be added later. +- na_ontap_firewall_policy: portmap added as a valid service +- na_ontap_net_routes: REST does not support the 'metric' attribute +- na_ontap_snapmirror: added initialize boolean option which specifies whether to initialize SnapMirror relation. +- na_ontap_volume: fixed error when deleting flexGroup volume with ONTAP 9.7. +- na_ontap_volume: tiering option requires 9.4 or later (error on volume-comp-aggr-attributes) +- na_ontap_vscan_scanner_pool: fix module only gets one scanner pool. + +### Added REST support to existing modules + +### Role Changes + +## 19.10.0 +Changes in 19.10.0 and September collection releases compared to Ansible 2.9 + +### New Modules +- na_ontap_name_service_switch: create/modify/delete name service switch configuration. +- na_ontap_iscsi_security: create/modify/delete iscsi security. + +### New Options +- na_ontap_command: `vserver`: to allow command to run as either cluster admin or vserver admin. To run as vserver admin you must use the vserver option. +- na_ontap_motd: rename `message` to `motd_message` to avoid conflict with Ansible internal variable name. +- na_ontap_nvme_namespace: `size_unit` to specify size in different units. +- na_ontap_snapshot_policy: `prefix`: option to use for creating snapshot policy. + +### Bug Fixes +- na_ontap_ndmp: minor documentation changes for restore_vm_cache_size and data_port_range. +- na_ontap_qtree: REST API takes "unix_permissions" as parameter instead of "mode". +- na_ontap_qtree: unix permission is not available when security style is ntfs +- na_ontap_user: minor documentation update for application parameter. +- na_ontap_volume: `efficiency_policy` was ignored +- na_ontap_volume: enforce that space_slo and space_guarantee are mutually exclusive +- na_ontap_svm: "allowed_protocols" added to param in proper way in case of using REST API +- na_ontap_firewall_policy: documentation changed for supported service parameter. +- na_ontap_net_subnet: fix ip_ranges option fails on existing subnet. +- na_ontap_snapshot_policy: fix vsadmin approach for managing snapshot policy. +- na_ontap_nvme_subsystem: fix fetching unique nvme subsytem based on vserver filter. +- na ontap_net_routes: change metric type from string to int. +- na_ontap_cifs_server: minor documentation changes correction of create example with "name" parameter and adding type to parameters. +- na_ontap_vserver_cifs_security: fix int and boolean options when modifying vserver cifs security. +- na_ontap_net_subnet: fix rename idempotency issue and updated rename check. + +### Added REST support to existing modules +By default, the module will use REST if the target system supports it, and the options are supported. Otherwise, it will switch back to ZAPI. This behavior can be controlled with the `use_rest` option: +1. Always: to force REST. The module fails and reports an error if REST cannot be used. +1. Never: to force ZAPI. This could be useful if you find some incompatibility with REST, or want to confirm the behavior is identical between REST and ZAPI. +1. Auto: the default, as described above. + +- na_ontap_ipspace +- na_ontap_export_policy +- na_ontap_ndmp +-- Note: only `enable` and `authtype` are supported with REST +- na_ontap_net_routes +- na_ontap_qtree +-- Note: `oplocks` is not supported with REST, defaults to enable. +- na_ontap_svm +-- Note: `root_volume`, `root_volume_aggregate`, `root_volume_security_style` are not supported with REST. +- na_ontap_job_schedule + +### Role Changes +- na_ontap_cluster_config updated to all cleaner playbook +- na_ontap_vserver_create updated to all cleaner playbook +- na_ontap_nas_create updated to all cleaner playbook +- na_ontap_san_create updated to all cleaner playbook diff --git a/ansible_collections/netapp/ontap/changelogs/.DS_Store b/ansible_collections/netapp/ontap/changelogs/.DS_Store new file mode 100644 index 000000000..316ace4f1 Binary files /dev/null and b/ansible_collections/netapp/ontap/changelogs/.DS_Store differ diff --git a/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml b/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml new file mode 100644 index 000000000..61e172720 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/.plugin-cache.yaml @@ -0,0 +1,683 @@ +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + httpapi: {} + inventory: {} + lookup: {} + module: + na_ontap_active_directory: + description: NetApp ONTAP configure active directory + name: na_ontap_active_directory + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.9.0 + na_ontap_aggregate: + description: NetApp ONTAP manage aggregates. + name: na_ontap_aggregate + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_autosupport: + description: NetApp ONTAP autosupport + name: na_ontap_autosupport + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_autosupport_invoke: + description: NetApp ONTAP send AutoSupport message + name: na_ontap_autosupport_invoke + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.4.0 + na_ontap_broadcast_domain: + description: NetApp ONTAP manage broadcast domains. + name: na_ontap_broadcast_domain + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_broadcast_domain_ports: + description: NetApp ONTAP manage broadcast domain ports + name: na_ontap_broadcast_domain_ports + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_cg_snapshot: + description: NetApp ONTAP manage consistency group snapshot + name: na_ontap_cg_snapshot + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_cifs: + description: NetApp ONTAP Manage cifs-share + name: na_ontap_cifs + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_cifs_acl: + description: NetApp ONTAP manage cifs-share-access-control + name: na_ontap_cifs_acl + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: null + na_ontap_cifs_local_group_member: + description: NetApp Ontap - Add or remove CIFS local group member + name: na_ontap_cifs_local_group_member + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.2.0 + na_ontap_cifs_local_user_modify: + description: NetApp ONTAP modify local CIFS user. + name: na_ontap_cifs_local_user_modify + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_cifs_local_user_set_password: + description: NetApp ONTAP set local CIFS user password + name: na_ontap_cifs_local_user_set_password + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.8.0 + na_ontap_cifs_server: + description: NetApp ONTAP CIFS server configuration + name: na_ontap_cifs_server + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_cluster: + description: NetApp ONTAP cluster - create a cluster and add/remove nodes. + name: na_ontap_cluster + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_cluster_ha: + description: NetApp ONTAP Manage HA status for cluster + name: na_ontap_cluster_ha + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_cluster_peer: + description: NetApp ONTAP Manage Cluster peering + name: na_ontap_cluster_peer + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_command: + description: NetApp ONTAP Run any cli command, the username provided needs to + have console login permission. + name: na_ontap_command + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_debug: + description: NetApp ONTAP Debug netapp-lib import and connection. + name: na_ontap_debug + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.1.0 + na_ontap_disk_options: + description: NetApp ONTAP modify storage disk options + name: na_ontap_disk_options + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_disks: + description: NetApp ONTAP Assign disks to nodes + name: na_ontap_disks + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_dns: + description: NetApp ONTAP Create, delete, modify DNS servers. + name: na_ontap_dns + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_domain_tunnel: + description: NetApp ONTAP domain tunnel + name: na_ontap_domain_tunnel + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.3.0 + na_ontap_efficiency_policy: + description: NetApp ONTAP manage efficiency policies (sis policies) + name: na_ontap_efficiency_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_export_policy: + description: NetApp ONTAP manage export-policy + name: na_ontap_export_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_export_policy_rule: + description: NetApp ONTAP manage export policy rules + name: na_ontap_export_policy_rule + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_fcp: + description: NetApp ONTAP Start, Stop and Enable FCP services. + name: na_ontap_fcp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_fdsd: + description: NetApp ONTAP create or remove a File Directory security descriptor. + name: na_ontap_fdsd + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.8.0 + na_ontap_fdsp: + description: NetApp ONTAP create or delete a file directory security policy + name: na_ontap_fdsp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.8.0 + na_ontap_fdspt: + description: NetApp ONTAP create, delete or modify File Directory security policy + tasks + name: na_ontap_fdspt + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.8.0 + na_ontap_fdss: + description: NetApp ONTAP File Directory Security Set. + name: na_ontap_fdss + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.8.0 + na_ontap_file_directory_policy: + description: NetApp ONTAP create, delete, or modify vserver security file-directory + policy + name: na_ontap_file_directory_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.8.0 + na_ontap_firewall_policy: + description: NetApp ONTAP Manage a firewall policy + name: na_ontap_firewall_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_firmware_upgrade: + description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk. + name: na_ontap_firmware_upgrade + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_flexcache: + description: NetApp ONTAP FlexCache - create/delete relationship + name: na_ontap_flexcache + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_fpolicy_event: + description: NetApp ONTAP FPolicy policy event configuration + name: na_ontap_fpolicy_event + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_fpolicy_ext_engine: + description: NetApp ONTAP fPolicy external engine configuration. + name: na_ontap_fpolicy_ext_engine + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_fpolicy_policy: + description: NetApp ONTAP - Create, delete or modify an FPolicy policy. + name: na_ontap_fpolicy_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.3.0 + na_ontap_fpolicy_scope: + description: NetApp ONTAP - Create, delete or modify an FPolicy policy scope + configuration. + name: na_ontap_fpolicy_scope + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_fpolicy_status: + description: NetApp ONTAP - Enables or disables the specified fPolicy policy + name: na_ontap_fpolicy_status + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_igroup: + description: NetApp ONTAP iSCSI or FC igroup configuration + name: na_ontap_igroup + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_igroup_initiator: + description: NetApp ONTAP igroup initiator configuration + name: na_ontap_igroup_initiator + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_info: + description: NetApp information gatherer + name: na_ontap_info + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_interface: + description: NetApp ONTAP LIF configuration + name: na_ontap_interface + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_ipspace: + description: NetApp ONTAP Manage an ipspace + name: na_ontap_ipspace + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_iscsi: + description: NetApp ONTAP manage iSCSI service + name: na_ontap_iscsi + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_iscsi_security: + description: NetApp ONTAP Manage iscsi security. + name: na_ontap_iscsi_security + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 19.10.1 + na_ontap_job_schedule: + description: NetApp ONTAP Job Schedule + name: na_ontap_job_schedule + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_kerberos_realm: + description: NetApp ONTAP vserver nfs kerberos realm + name: na_ontap_kerberos_realm + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_ldap: + description: NetApp ONTAP LDAP + name: na_ontap_ldap + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_ldap_client: + description: NetApp ONTAP LDAP client + name: na_ontap_ldap_client + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_license: + description: NetApp ONTAP protocol and feature license packages + name: na_ontap_license + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_log_forward: + description: NetApp ONTAP Log Forward Configuration + name: na_ontap_log_forward + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.2.0 + na_ontap_login_messages: + description: Setup login banner and message of the day + name: na_ontap_login_messages + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.1.0 + na_ontap_lun: + description: NetApp ONTAP manage LUNs + name: na_ontap_lun + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_lun_copy: + description: NetApp ONTAP copy LUNs + name: na_ontap_lun_copy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_lun_map: + description: NetApp ONTAP LUN maps + name: na_ontap_lun_map + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_lun_map_reporting_nodes: + description: NetApp ONTAP LUN maps reporting nodes + name: na_ontap_lun_map_reporting_nodes + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.2.0 + na_ontap_mcc_mediator: + description: NetApp ONTAP Add and Remove MetroCluster Mediator + name: na_ontap_mcc_mediator + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.9.0 + na_ontap_metrocluster: + description: NetApp ONTAP set up a MetroCluster + name: na_ontap_metrocluster + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.9.0 + na_ontap_metrocluster_dr_group: + description: NetApp ONTAP manage MetroCluster DR Group + name: na_ontap_metrocluster_dr_group + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.11.0 + na_ontap_motd: + description: Setup motd + name: na_ontap_motd + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_name_service_switch: + description: NetApp ONTAP Manage name service switch + name: na_ontap_name_service_switch + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: null + na_ontap_ndmp: + description: NetApp ONTAP NDMP services configuration + name: na_ontap_ndmp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_net_ifgrp: + description: NetApp Ontap modify network interface group + name: na_ontap_net_ifgrp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_net_port: + description: NetApp ONTAP network ports. + name: na_ontap_net_port + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_net_routes: + description: NetApp ONTAP network routes + name: na_ontap_net_routes + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_net_subnet: + description: NetApp ONTAP Create, delete, modify network subnets. + name: na_ontap_net_subnet + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_net_vlan: + description: NetApp ONTAP network VLAN + name: na_ontap_net_vlan + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_nfs: + description: NetApp ONTAP NFS status + name: na_ontap_nfs + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_node: + description: NetApp ONTAP Modify or Rename a node. + name: na_ontap_node + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_ntfs_dacl: + description: NetApp Ontap create, delate or modify NTFS DACL (discretionary + access control list) + name: na_ontap_ntfs_dacl + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.4.0 + na_ontap_ntfs_sd: + description: NetApp ONTAP create, delete or modify NTFS security descriptor + name: na_ontap_ntfs_sd + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.4.0 + na_ontap_ntp: + description: NetApp ONTAP NTP server + name: na_ontap_ntp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_ntp_key: + description: NetApp ONTAP NTP key + name: na_ontap_ntp_key + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.21.0 + na_ontap_nvme: + description: NetApp ONTAP Manage NVMe Service + name: na_ontap_nvme + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_nvme_namespace: + description: NetApp ONTAP Manage NVME Namespace + name: na_ontap_nvme_namespace + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_nvme_subsystem: + description: NetApp ONTAP Manage NVME Subsystem + name: na_ontap_nvme_subsystem + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_object_store: + description: NetApp ONTAP manage object store config. + name: na_ontap_object_store + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_partitions: + description: NetApp ONTAP Assign partitions and disks to nodes. + name: na_ontap_partitions + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.8.0 + na_ontap_ports: + description: NetApp ONTAP add/remove ports + name: na_ontap_ports + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_portset: + description: NetApp ONTAP Create/Delete portset + name: na_ontap_portset + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_publickey: + description: NetApp ONTAP publickey configuration + name: na_ontap_publickey + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.7.0 + na_ontap_qos_adaptive_policy_group: + description: NetApp ONTAP Adaptive Quality of Service policy group. + name: na_ontap_qos_adaptive_policy_group + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_qos_policy_group: + description: NetApp ONTAP manage policy group in Quality of Service. + name: na_ontap_qos_policy_group + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_qtree: + description: NetApp ONTAP manage qtrees + name: na_ontap_qtree + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_quota_policy: + description: NetApp Ontap create, assign, rename or delete quota policy + name: na_ontap_quota_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 19.11.0 + na_ontap_quotas: + description: NetApp ONTAP Quotas + name: na_ontap_quotas + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_rest_cli: + description: NetApp ONTAP run any CLI command using REST api/private/cli/ + name: na_ontap_rest_cli + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_rest_info: + description: NetApp ONTAP information gatherer using REST APIs + name: na_ontap_rest_info + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.5.0 + na_ontap_restit: + description: NetApp ONTAP Run any REST API on ONTAP + name: na_ontap_restit + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.4.0 + na_ontap_s3_buckets: + description: NetApp ONTAP S3 Buckets + name: na_ontap_s3_buckets + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.19.0 + na_ontap_s3_groups: + description: NetApp ONTAP S3 groups + name: na_ontap_s3_groups + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.21.0 + na_ontap_s3_policies: + description: NetApp ONTAP S3 Policies + name: na_ontap_s3_policies + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.21.0 + na_ontap_s3_services: + description: NetApp ONTAP S3 services + name: na_ontap_s3_services + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.20.0 + na_ontap_s3_users: + description: NetApp ONTAP S3 users + name: na_ontap_s3_users + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.20.0 + na_ontap_security_certificates: + description: NetApp ONTAP manage security certificates. + name: na_ontap_security_certificates + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.7.0 + na_ontap_security_config: + description: NetApp ONTAP modify security config for SSL. + name: na_ontap_security_config + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.3.0 + na_ontap_security_key_manager: + description: NetApp ONTAP security key manager. + name: na_ontap_security_key_manager + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_service_policy: + description: NetApp ONTAP service policy configuration + name: na_ontap_service_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.7.0 + na_ontap_service_processor_network: + description: NetApp ONTAP service processor network + name: na_ontap_service_processor_network + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_snaplock_clock: + description: NetApp ONTAP Sets the snaplock compliance clock. + name: na_ontap_snaplock_clock + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.4.0 + na_ontap_snapmirror: + description: NetApp ONTAP or ElementSW Manage SnapMirror + name: na_ontap_snapmirror + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_snapmirror_policy: + description: NetApp ONTAP create, delete or modify SnapMirror policies + name: na_ontap_snapmirror_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.3.0 + na_ontap_snapshot: + description: NetApp ONTAP manage Snapshots + name: na_ontap_snapshot + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_snapshot_policy: + description: NetApp ONTAP manage Snapshot Policy + name: na_ontap_snapshot_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_snmp: + description: NetApp ONTAP SNMP community + name: na_ontap_snmp + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_snmp_traphosts: + description: NetApp ONTAP SNMP traphosts. + name: na_ontap_snmp_traphosts + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.3.0 + na_ontap_software_update: + description: NetApp ONTAP Update Software + name: na_ontap_software_update + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_ssh_command: + description: NetApp ONTAP Run any cli command over plain SSH using paramiko. + name: na_ontap_ssh_command + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.8.0 + na_ontap_storage_auto_giveback: + description: Enables or disables NetApp ONTAP storage auto giveback for a specified + node + name: na_ontap_storage_auto_giveback + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.3.0 + na_ontap_storage_failover: + description: Enables or disables NetApp Ontap storage failover for a specified + node + name: na_ontap_storage_failover + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.3.0 + na_ontap_svm: + description: NetApp ONTAP SVM + name: na_ontap_svm + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_svm_options: + description: NetApp ONTAP Modify SVM Options + name: na_ontap_svm_options + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_ucadapter: + description: NetApp ONTAP UC adapter configuration + name: na_ontap_ucadapter + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_unix_group: + description: NetApp ONTAP UNIX Group + name: na_ontap_unix_group + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_unix_user: + description: NetApp ONTAP UNIX users + name: na_ontap_unix_user + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_user: + description: NetApp ONTAP user configuration and management + name: na_ontap_user + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_user_role: + description: NetApp ONTAP user role configuration and management + name: na_ontap_user_role + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_volume: + description: NetApp ONTAP manage volumes. + name: na_ontap_volume + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_volume_autosize: + description: NetApp ONTAP manage volume autosize + name: na_ontap_volume_autosize + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_volume_clone: + description: NetApp ONTAP manage volume clones. + name: na_ontap_volume_clone + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.6.0 + na_ontap_volume_efficiency: + description: NetApp ONTAP enables, disables or modifies volume efficiency + name: na_ontap_volume_efficiency + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 21.2.0 + na_ontap_volume_snaplock: + description: NetApp ONTAP manage volume snaplock retention. + name: na_ontap_volume_snaplock + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.2.0 + na_ontap_vscan: + description: NetApp ONTAP Vscan enable/disable. + name: na_ontap_vscan + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_vscan_on_access_policy: + description: NetApp ONTAP Vscan on access policy configuration. + name: na_ontap_vscan_on_access_policy + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_vscan_on_demand_task: + description: NetApp ONTAP Vscan on demand task configuration. + name: na_ontap_vscan_on_demand_task + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_vscan_scanner_pool: + description: NetApp ONTAP Vscan Scanner Pools Configuration. + name: na_ontap_vscan_scanner_pool + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.8.0 + na_ontap_vserver_cifs_security: + description: NetApp ONTAP vserver CIFS security modification + name: na_ontap_vserver_cifs_security + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.9.0 + na_ontap_vserver_peer: + description: NetApp ONTAP Vserver peering + name: na_ontap_vserver_peer + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 2.7.0 + na_ontap_wait_for_condition: + description: NetApp ONTAP wait_for_condition. Loop over a get status request + until a condition is met. + name: na_ontap_wait_for_condition + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.8.0 + na_ontap_wwpn_alias: + description: NetApp ONTAP set FCP WWPN Alias + name: na_ontap_wwpn_alias + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.4.0 + na_ontap_zapit: + description: NetApp ONTAP Run any ZAPI on ONTAP + name: na_ontap_zapit + namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelog3bt20e04.collections.ansible_collections.netapp.ontap.plugins.modules + version_added: 20.4.0 + netconf: {} + shell: {} + strategy: {} + vars: {} +version: 21.22.0 diff --git a/ansible_collections/netapp/ontap/changelogs/changelog.yaml b/ansible_collections/netapp/ontap/changelogs/changelog.yaml new file mode 100644 index 000000000..d850c7337 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/changelog.yaml @@ -0,0 +1,3035 @@ +ancestor: null +releases: + 19.10.0: + changes: + bugfixes: + - na ontap_net_routes - change metric type from string to int. + - na_ontap_cifs_server - minor documentation changes correction of create example + with "name" parameter and adding type to parameters. + - na_ontap_firewall_policy - documentation changed for supported service parameter. + - na_ontap_ndmp - minor documentation changes for restore_vm_cache_size and + data_port_range. + - na_ontap_net_subnet - fix ip_ranges option fails on existing subnet. + - na_ontap_net_subnet - fix rename idempotency issue and updated rename check. + - na_ontap_nvme_subsystem - fix fetching unique nvme subsytem based on vserver + filter. + - na_ontap_qtree - REST API takes "unix_permissions" as parameter instead of + "mode". + - na_ontap_qtree - unix permission is not available when security style is ntfs + - na_ontap_snapshot_policy - fix vsadmin approach for managing snapshot policy. + - na_ontap_svm - ``allowed_protocols`` added to param in proper way in case + of using REST API + - na_ontap_user - minor documentation update for application parameter. + - na_ontap_volume - ``efficiency_policy`` was ignored + - na_ontap_volume - enforce that space_slo and space_guarantee are mutually + exclusive + - na_ontap_vserver_cifs_security - fix int and boolean options when modifying + vserver cifs security. + minor_changes: + - "Added REST support to existing modules.\n By default, the module will use + REST if the target system supports it, and the options are supported. Otherwise, + it will switch back to ZAPI.\n This behavior can be controlled with the ``use_rest`` + option.\n Always - to force REST. The module fails and reports an error + if REST cannot be used.\n Never - to force ZAPI. This could be useful if + you find some incompatibility with REST, or want to confirm the behavior is + identical between REST and ZAPI.\n Auto - the default, as described above.\n" + - na_ontap_cluster_config - role updated to support a cleaner playbook + - na_ontap_command - ``vserver`` - to allow command to run as either cluster + admin or vserver admin. To run as vserver admin you must use the vserver + option. + - na_ontap_export_policy - REST support + - na_ontap_ipspace - REST support + - na_ontap_job_schedule - REST support + - na_ontap_motd - rename ``message`` to ``motd_message`` to avoid conflict with + Ansible internal variable name. + - na_ontap_nas_create - role updated to support a cleaner playbook + - na_ontap_ndmp - REST support - only ``enable`` and ``authtype`` are supported + with REST + - na_ontap_net_routes - REST support + - na_ontap_nvme_namespace - ``size_unit`` to specify size in different units. + - na_ontap_qtree - REST support - ``oplocks`` is not supported with REST, defaults + to enable. + - na_ontap_san_create - role updated to support a cleaner playbook + - na_ontap_snapshot_policy - ``prefix`` - option to use for creating snapshot + policy. + - na_ontap_svm - REST support - ``root_volume``, ``root_volume_aggregate``, + ``root_volume_security_style`` are not supported with REST. + - na_ontap_vserver_create - role updated to support a cleaner playbook + fragments: + - 19.10.0.yaml + release_date: '2019-10-31' + 19.10.1: + modules: + - description: NetApp ONTAP Manage iscsi security. + name: na_ontap_iscsi_security + namespace: '' + release_date: '2019-11-01' + 19.11.0: + changes: + bugfixes: + - na_ontap_cluster - autosupport log pushed after cluster create is performed, + removed license add or remove option. + - na_ontap_dns - report error if modify or delete operations are attempted on + cserver when using REST. Make create operation idempotent for cserver when + using REST. Support for modify/delete on cserver when using REST will be + added later. + - na_ontap_firewall_policy - portmap added as a valid service + - na_ontap_net_routes - REST does not support the ``metric`` attribute + - na_ontap_snapmirror - added initialize boolean option which specifies whether + to initialize SnapMirror relation. + - na_ontap_volume - fixed error when deleting flexGroup volume with ONTAP 9.7. + - na_ontap_volume - tiering option requires 9.4 or later (error on volume-comp-aggr-attributes) + - na_ontap_vscan_scanner_pool - fix module only gets one scanner pool. + minor_changes: + - na_ontap_cluster - added single node cluster option, also now supports for + modify cluster contact and location option. + - na_ontap_efficiency_policy - ``changelog_threshold_percent`` to set the percentage + at which the changelog will be processed for a threshold type of policy, tested + once each hour. + - na_ontap_info - Added ``vscan_status_info``, ``vscan_scanner_pool_info``, + ``vscan_connection_status_all_info``, ``vscan_connection_extended_stats_info`` + - na_ontap_info - Now allow you use to vsadmin to get info (Must user ``vserver`` + option). + fragments: + - 19.11.0.yaml + modules: + - description: NetApp Ontap create, rename or delete quota policy + name: na_ontap_quota_policy + namespace: '' + release_date: '2019-11-14' + 2.6.0: + modules: + - description: NetApp ONTAP manage aggregates. + name: na_ontap_aggregate + namespace: '' + - description: NetApp ONTAP manage broadcast domains. + name: na_ontap_broadcast_domain + namespace: '' + - description: NetApp ONTAP manage broadcast domain ports + name: na_ontap_broadcast_domain_ports + namespace: '' + - description: NetApp ONTAP Manage cifs-share + name: na_ontap_cifs + namespace: '' + - description: NetApp ONTAP manage cifs-share-access-control + name: na_ontap_cifs_acl + namespace: '' + - description: NetApp ONTAP CIFS server configuration + name: na_ontap_cifs_server + namespace: '' + - description: NetApp ONTAP cluster - create a cluster and add/remove nodes. + name: na_ontap_cluster + namespace: '' + - description: NetApp ONTAP Manage HA status for cluster + name: na_ontap_cluster_ha + namespace: '' + - description: NetApp ONTAP manage export-policy + name: na_ontap_export_policy + namespace: '' + - description: NetApp ONTAP manage export policy rules + name: na_ontap_export_policy_rule + namespace: '' + - description: NetApp ONTAP iSCSI or FC igroup configuration + name: na_ontap_igroup + namespace: '' + - description: NetApp ONTAP LIF configuration + name: na_ontap_interface + namespace: '' + - description: NetApp ONTAP manage iSCSI service + name: na_ontap_iscsi + namespace: '' + - description: NetApp ONTAP Job Schedule + name: na_ontap_job_schedule + namespace: '' + - description: NetApp ONTAP protocol and feature licenses + name: na_ontap_license + namespace: '' + - description: NetApp ONTAP manage LUNs + name: na_ontap_lun + namespace: '' + - description: NetApp ONTAP LUN maps + name: na_ontap_lun_map + namespace: '' + - description: NetApp Ontap modify network interface group + name: na_ontap_net_ifgrp + namespace: '' + - description: NetApp ONTAP network ports. + name: na_ontap_net_port + namespace: '' + - description: NetApp ONTAP network routes + name: na_ontap_net_routes + namespace: '' + - description: NetApp ONTAP network VLAN + name: na_ontap_net_vlan + namespace: '' + - description: NetApp ONTAP NFS status + name: na_ontap_nfs + namespace: '' + - description: NetApp ONTAP NTP server + name: na_ontap_ntp + namespace: '' + - description: NetApp ONTAP manage qtrees + name: na_ontap_qtree + namespace: '' + - description: NetApp ONTAP service processor network + name: na_ontap_service_processor_network + namespace: '' + - description: NetApp ONTAP manage Snapshots + name: na_ontap_snapshot + namespace: '' + - description: NetApp ONTAP SNMP community + name: na_ontap_snmp + namespace: '' + - description: NetApp ONTAP SVM + name: na_ontap_svm + namespace: '' + - description: NetApp ONTAP UC adapter configuration + name: na_ontap_ucadapter + namespace: '' + - description: NetApp ONTAP user configuration and management + name: na_ontap_user + namespace: '' + - description: NetApp ONTAP user role configuration and management + name: na_ontap_user_role + namespace: '' + - description: NetApp ONTAP manage volumes. + name: na_ontap_volume + namespace: '' + - description: NetApp ONTAP manage volume clones. + name: na_ontap_volume_clone + namespace: '' + release_date: '2018-05-24' + 2.7.0: + modules: + - description: NetApp ONTAP Autosupport + name: na_ontap_autosupport + namespace: '' + - description: NetApp ONTAP manage consistency group snapshot + name: na_ontap_cg_snapshot + namespace: '' + - description: NetApp ONTAP Manage Cluster peering + name: na_ontap_cluster_peer + namespace: '' + - description: NetApp ONTAP Run any cli command, the username provided needs to + have console login permission. + name: na_ontap_command + namespace: '' + - description: NetApp ONTAP Assign disks to nodes + name: na_ontap_disks + namespace: '' + - description: NetApp ONTAP Create, delete, modify DNS servers. + name: na_ontap_dns + namespace: '' + - description: NetApp ONTAP Start, Stop and Enable FCP services. + name: na_ontap_fcp + namespace: '' + - description: NetApp ONTAP Manage a firewall policy + name: na_ontap_firewall_policy + namespace: '' + - description: Setup motd + name: na_ontap_motd + namespace: '' + - description: NetApp ONTAP Rename a node. + name: na_ontap_node + namespace: '' + - description: NetApp ONTAP or ElementSW Manage SnapMirror + name: na_ontap_snapmirror + namespace: '' + - description: NetApp ONTAP Update Software + name: na_ontap_software_update + namespace: '' + - description: NetApp ONTAP Modify SVM Options + name: na_ontap_svm_options + namespace: '' + - description: NetApp ONTAP Vserver peering + name: na_ontap_vserver_peer + namespace: '' + release_date: '2018-09-21' + 2.8.0: + modules: + - description: NetApp ONTAP FlexCache - create/delete relationship + name: na_ontap_flexcache + namespace: '' + - description: NetApp ONTAP igroup initiator configuration + name: na_ontap_igroup_initiator + namespace: '' + - description: NetApp ONTAP copy LUNs + name: na_ontap_lun_copy + namespace: '' + - description: NetApp ONTAP Create, delete, modify network subnets. + name: na_ontap_net_subnet + namespace: '' + - description: NetApp ONTAP Manage NVMe Service + name: na_ontap_nvme + namespace: '' + - description: NetApp ONTAP Manage NVME Namespace + name: na_ontap_nvme_namespace + namespace: '' + - description: NetApp ONTAP Manage NVME Subsystem + name: na_ontap_nvme_subsystem + namespace: '' + - description: NetApp ONTAP Create/Delete portset + name: na_ontap_portset + namespace: '' + - description: NetApp ONTAP manage policy group in Quality of Service. + name: na_ontap_qos_policy_group + namespace: '' + - description: NetApp ONTAP Quotas + name: na_ontap_quotas + namespace: '' + - description: NetApp ONTAP security key manager. + name: na_ontap_security_key_manager + namespace: '' + - description: NetApp ONTAP manage Snapshot Policy + name: na_ontap_snapshot_policy + namespace: '' + - description: NetApp ONTAP UNIX Group + name: na_ontap_unix_group + namespace: '' + - description: NetApp ONTAP UNIX users + name: na_ontap_unix_user + namespace: '' + - description: NetApp ONTAP Vscan on access policy configuration. + name: na_ontap_vscan_on_access_policy + namespace: '' + - description: NetApp ONTAP Vscan on demand task configuration. + name: na_ontap_vscan_on_demand_task + namespace: '' + - description: NetApp ONTAP Vscan Scanner Pools Configuration. + name: na_ontap_vscan_scanner_pool + namespace: '' + release_date: '2019-04-11' + 2.9.0: + modules: + - description: NetApp ONTAP manage efficiency policies (sis policies) + name: na_ontap_efficiency_policy + namespace: '' + - description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk. + name: na_ontap_firmware_upgrade + namespace: '' + - description: NetApp information gatherer + name: na_ontap_info + namespace: '' + - description: NetApp ONTAP Manage an ipspace + name: na_ontap_ipspace + namespace: '' + - description: NetApp ONTAP vserver nfs kerberos realm + name: na_ontap_kerberos_realm + namespace: '' + - description: NetApp ONTAP LDAP + name: na_ontap_ldap + namespace: '' + - description: NetApp ONTAP LDAP client + name: na_ontap_ldap_client + namespace: '' + - description: NetApp ONTAP NDMP services configuration + name: na_ontap_ndmp + namespace: '' + - description: NetApp ONTAP manage object store config. + name: na_ontap_object_store + namespace: '' + - description: NetApp ONTAP add/remove ports + name: na_ontap_ports + namespace: '' + - description: NetApp ONTAP Adaptive Quality of Service policy group. + name: na_ontap_qos_adaptive_policy_group + namespace: '' + - description: NetApp ONTAP Run any cli command, the username provided needs to + have console login permission. + name: na_ontap_rest_cli + namespace: '' + - description: NetApp ONTAP manage volume autosize + name: na_ontap_volume_autosize + namespace: '' + - description: NetApp ONTAP Vscan enable/disable. + name: na_ontap_vscan + namespace: '' + - description: NetApp ONTAP vserver CIFS security modification + name: na_ontap_vserver_cifs_security + namespace: '' + release_date: '2019-09-16' + 20.1.0: + changes: + bugfixes: + - na_ontap_aggregate - Fixed traceback when running as vsadmin and cleanly error + out. + - na_ontap_command - stdout_lines_filter contains data only if include/exlude_lines + parameter is used. (zeten30) + - na_ontap_command - stripped_line len is checked only once, filters are inside + if block. (zeten30) + - na_ontap_interface - allow module to run on node before joining the cluster. + - na_ontap_net_ifgrp - Fixed error for na_ontap_net_ifgrp if no port is given. + - na_ontap_snapmirror - Fixed traceback when running as vsadmin. Do not attempt + to break a relationship that is 'Uninitialized'. + - na_ontap_snapshot_policy - Fixed KeyError on ``prefix`` issue when prefix + parameter isn't supplied. + - na_ontap_volume - Fixed error reporting if efficiency policy cannot be read. Do + not attempt to read efficiency policy if not needed. + - na_ontap_volume - Fixed error when modifying volume efficiency policy. + - na_ontap_volume_clone - Fixed KeyError exception on ``volume`` + minor_changes: + - na_ontap_aggregate - add ``snaplock_type``. + - na_ontap_dns - added REST support for dns creation and modification on cluster + vserver. + - na_ontap_igroup_initiator - ``force_remove`` to forcibly remove initiators + from an igroup that is currently mapped to a LUN. + - na_ontap_info - New info's added ``cifs_server_info``, ``cifs_share_info``, + ``cifs_vserver_security_info``, ``cluster_peer_info``, ``clock_info``, ``export_policy_info``, + ``export_rule_info``, ``fcp_adapter_info``, ``fcp_alias_info``, ``fcp_service_info``, + ``job_schedule_cron_info``, ``kerberos_realm_info``, ``ldap_client``, ``ldap_config``, + ``net_failover_group_info``, ``net_firewall_info``, ``net_ipspaces_info``, + ``net_port_broadcast_domain_info``, ``net_routes_info``, ``net_vlan_info``, + ``nfs_info``, ``ntfs_dacl_info``, ``ntfs_sd_info``, ``ntp_server_info``, ``role_info``, + ``service_processor_network_info``, ``sis_policy_info``, ``snapmirror_policy_info``, + ``snapshot_policy_info``, ``vscan_info``, ``vserver_peer_info`` + - na_ontap_interface - ``failover_group`` to specify the failover group for + the LIF. ``is_ipv4_link_local`` to specify the LIF's are to acquire a ipv4 + link local address. + - na_ontap_rest_cli - add OPTIONS as a supported verb and return list of allowed + verbs. + - na_ontap_volume - add ``group_id`` and ``user_id``. + fragments: + - 20.1.0.yaml + modules: + - description: Setup login banner and message of the day + name: na_ontap_login_messages + namespace: '' + release_date: '2020-01-08' + 20.10.0: + changes: + bugfixes: + - na_ontap_aggregate - support concurrent actions for rename/modify/add_object_store + and create/add_object_store. + - na_ontap_cluster - ``single_node_cluster`` option was ignored. + - na_ontap_info - KeyError on ``tree`` for quota_report_info. + - na_ontap_info - better reporting on KeyError traceback, option to ignore error. + - na_ontap_snapmirror_policy - report error when attempting to change ``policy_type`` + rather than taking no action. + - na_ontap_volume - ``encrypt`` with a value of ``false`` is ignored when creating + a volume. + minor_changes: + - na_ontap_rest_info - Support for gather subsets - ``application_info, application_template_info, + autosupport_config_info , autosupport_messages_history, ontap_system_version, + storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, + storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, + storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, + support_ems_filters`` + fragments: + - DEVOPS-2426.yaml + - DEVOPS-3113.yaml + - DEVOPS-3139.yaml + - DEVOPS-3167.yaml + - DEVOPS-3178.yaml + - DEVOPS-3194.yaml + - DEVOPS-3251.yaml + release_date: '2020-10-08' + 20.11.0: + changes: + bugfixes: + - All REST modules, will not fail if a job fails + - na_ontap_cifs - fix idempotency issue when ``show-previous-versions`` is used. + - na_ontap_firmware_upgrade - fix ValueError issue when processing URL error. + - na_ontap_info - Use ``node-id`` as key rather than ``current-version``. + - na_ontap_ipspace - invalid call in error reporting (double error). + - na_ontap_software_update - module is not idempotent. + minor_changes: + - na_ontap_cifs - output ``modified`` if a modify action is taken. + - na_ontap_cluster_peer - optional parameter ``ipspace`` added for cluster peer. + - na_ontap_export_policy_rule - minor doc updates. + - na_ontap_info - do not require write access privileges. This also enables + other modules to work in check_mode without write access permissions. + - na_ontap_interface - minor example update. + - na_ontap_lun - ``use_exact_size`` to create a lun with the exact given size + so that the lun is not rounded up. + - na_ontap_lun - support modify for space_allocation and space_reserve. + - na_ontap_mcc_mediator - improve error reporting when REST is not available. + - na_ontap_metrocluster - improve error reporting when REST is not available. + - na_ontap_software_update - add `force_update` option to ignore current version. + - na_ontap_svm - output ``modified`` if a modify action is taken. + - na_ontap_wwpn_alias - improve error reporting when REST is not available. + fragments: + - DEVOPS-2965.yaml + - DEVOPS-3149.yaml + - DEVOPS-3262.yaml + - DEVOPS-3304.yaml + - DEVOPS-3310.yml + - DEVOPS-3312.yaml + - DEVOPS-3354.yaml + - DEVOPS-3358.yaml + - DEVOPS-3366.yaml + - github-56.yaml + modules: + - description: NetApp ONTAP manage MetroCluster DR Group + name: na_ontap_metrocluster_dr_group + namespace: '' + release_date: '2020-11-05' + 20.12.0: + changes: + bugfixes: + - na_ontap_broadcast_domain_ports - handle ``changed`` for check_mode and report + correctly. + - na_ontap_cifs - fix for AttributeError - 'NoneType' object has no attribute + 'get' on line 300 + - na_ontap_svm - warning for ``aggr_list`` wildcard value(``*``) in create idempotency. + - na_ontap_user - application expects only ``service_processor`` but module + supports ``service-processor``. + - na_ontap_volume - checking for success before failure lead to 'NoneType' object + has no attribute 'get_child_by_name' when modifying a Flexcache volume. + - na_ontap_volume - fix volume type modify issue by reporting error. + minor_changes: + - all ZAPI modules - new ``classic_basic_authorization`` feature_flag to disable + adding Authorization header proactively. + - all ZAPI modules - optimize Basic Authentication by adding Authorization header + proactively. + - na_ontap_igroup - new option ``os_type`` to replace ``ostype`` (but ostype + is still accepted). + - na_ontap_info - New options ``cifs_options_info``, ``cluster_log_forwarding_info``, + ``event_notification_destination_info``, ``event_notification_info``, ``security_login_role_config_info``, + ``security_login_role_info`` have been added. + - na_ontap_lun - new option ``from_name`` to rename a LUN. + - na_ontap_lun - new option ``os_type`` to replace ``ostype`` (but ostype is + still accepted), and removed default to ``image``. + - na_ontap_lun - new option ``qos_policy_group`` to assign a qos_policy_group + to a LUN. + - na_ontap_lun - new option ``san_application_template`` to create LUNs without + explicitly creating a volume and using REST APIs. + - na_ontap_qos_policy_group - new option ``is_shared`` for sharing QOS SLOs + or not. + - na_ontap_quota_policy - new option ``auto_assign`` to assign quota policy + to vserver. + - na_ontap_quotas - New option ``activate_quota_on_change`` to resize or reinitialize + quotas. + - na_ontap_quotas - New option ``perform_user_mapping`` to perform user mapping + for the user specified in quota-target. + - na_ontap_rest_info - Support for gather subsets - ``cifs_home_directory_info, + cluster_software_download, event_notification_info, event_notification_destination_info, + security_login_info, security_login_rest_role_info`` + - na_ontap_volume - ``compression`` to enable compression on a FAS volume. + - na_ontap_volume - ``inline-compression`` to enable inline compression on a + volume. + - na_ontap_volume - ``nas_application_template`` to create a volume using nas + application REST API. + - na_ontap_volume - ``size_change_threshold`` to ignore small changes in volume + size. + - na_ontap_volume - ``sizing_method`` to resize a FlexGroup using REST. + fragments: + - DEVOPS-2668.yaml + - DEVOPS-2964.yaml + - DEVOPS-3181.yaml + - DEVOPS-3329.yaml + - DEVOPS-3346.yaml + - DEVOPS-3367.yaml + - DEVOPS-3368.yaml + - DEVOPS-3369.yaml + - DEVOPS-3371.yaml + - DEVOPS-3385.yaml + - DEVOPS-3386.yaml + - DEVOPS-3390.yaml + - DEVOPS-3392.yaml + - DEVOPS-3399.yaml + - DEVOPS-3400.yaml + - DEVOPS-3401.yaml + - DEVOPS-3442.yaml + - DEVOPS-3443.yaml + - DEVOPS-3454.yaml + release_date: '2020-12-02' + 20.2.0: + changes: + bugfixes: + - na_ontap_cifs_server - Fixed KeyError exception on 'cifs_server_name' + - na_ontap_command - fixed traceback when using return_dict if u'1' is present + in result value. + - na_ontap_login_messages - Fixed example documentation and spelling mistake + issue + - na_ontap_nvme_subsystem - fixed bug when creating subsystem, vserver was not + filtered. + - na_ontap_qtree - Fixed issue with Get function for REST + - na_ontap_svm - if language C.UTF-8 is specified, the module is not idempotent + - na_ontap_svm - if snapshot policy is changed, modify fails with "Extra input + - snapshot_policy" + - na_ontap_volume_clone - fixed 'Extra input - parent-vserver' error when running + as cluster admin. + minor_changes: + - na_ontap_info - New info's added ``snapshot_info`` + - na_ontap_info - ``max_records`` option to set maximum number of records to + return per subset. + - na_ontap_nas_create - role - fix typo in README file, add CIFS example. - + - na_ontap_snapmirror - ``relationship_state`` option for breaking the snapmirror + relationship. + - na_ontap_snapmirror - ``update_snapmirror`` option for updating the snapmirror + relationship. + - na_ontap_volume_clone - ``split`` option to split clone volume from parent + volume. + fragments: + - 20.2.0.yaml + modules: + - description: NetApp ONTAP manage volume snaplock retention. + name: na_ontap_volume_snaplock + namespace: '' + release_date: '2020-02-05' + 20.3.0: + changes: + bugfixes: + - na_ontap_volume_snaplock - Fixed KeyError exception on 'is-volume-append-mode-enabled' + - na_ontap_vscan_scanner_pool - has been updated to match the standard format + used for all other ontap modules + minor_changes: + - na_ontap_info - New info's added ``storage_bridge_info`` + - na_ontap_info - New info's added `cluster_identity_info`` + - na_ontap_snapmirror - performs resync when the ``relationship_state`` is active + and the current state is broken-off. + fragments: + - 20.3.0.yaml + modules: + - description: NetApp ONTAP create, delete or modify SnapMirror policies + name: na_ontap_snapmirror_policy + namespace: '' + - description: NetApp ONTAP SNMP traphosts. + name: na_ontap_snmp_traphosts + namespace: '' + release_date: '2020-03-04' + 20.4.0: + changes: + bugfixes: + - na_ontap_cifs_server - delete AD account if username and password are provided + when state=absent + - na_ontap_info - cifs_server_info - fix KeyError exception on ``domain`` if + only ``domain-workgroup`` is present. + - na_ontap_info - return all records of each gathered subset. + - na_ontap_iscsi_security - Fixed modify functionality for CHAP and typo correction + - na_ontap_kerberos_realm - fix ``kdc_vendor`` case sensitivity issue. + - na_ontap_snapmirror - calling quiesce before snapmirror break. + minor_changes: + - na_ontap_aggregate - ``disk_count`` option allows adding additional disk to + aggregate. + - na_ontap_info - ``max_records`` option specifies maximum number of records + returned in a single ZAPI call. + - na_ontap_info - ``summary`` option specifies a boolean flag to control return + all or none of the info attributes. + - na_ontap_info - new fact - iscsi_service_info. + - na_ontap_info - new fact - license_info. + - na_ontap_info - new fact - metrocluster_check_info. + - na_ontap_info - new fact - metrocluster_info. + - na_ontap_info - new fact - metrocluster_node_info. + - na_ontap_info - new fact - net_interface_service_policy_info. + - na_ontap_info - new fact - ontap_system_version. + - na_ontap_info - new fact - ontapi_version (and deprecate ontap_version, both + fields are reported for now). + - na_ontap_info - new fact - qtree_info. + - na_ontap_info - new fact - quota_report_info. + - na_ontap_info - new fact - snapmirror_destination_info. + - na_ontap_interface - ``service_policy`` option to identify a single service + or a list of services that will use a LIF. + - na_ontap_kerberos_realm - ``ad_server_ip`` option specifies IP Address of + the Active Directory Domain Controller (DC). + - na_ontap_kerberos_realm - ``ad_server_name`` option specifies Host name of + the Active Directory Domain Controller (DC). + - na_ontap_snapmirror - ``relationship-info-only`` option allows to manage relationship + information. + - na_ontap_snapmirror_policy - REST is included and all defaults are removed + from options. + - na_ontap_software_update - ``download_only`` options allows to download cluster + image without software update. + - na_ontap_volume - ``snapshot_auto_delete`` option allows to manage auto delete + settings of a specified volume. + fragments: + - 20.4.0.yaml + modules: + - description: NetApp ONTAP send AutoSupport message + name: na_ontap_autosupport_invoke + namespace: '' + - description: NetApp Ontap create, delate or modify NTFS DACL (discretionary + access control list) + name: na_ontap_ntfs_dacl + namespace: '' + - description: NetApp ONTAP create, delete or modify NTFS security descriptor + name: na_ontap_ntfs_sd + namespace: '' + - description: NetApp ONTAP Run any REST API on ONTAP + name: na_ontap_restit + namespace: '' + - description: NetApp ONTAP set FCP WWPN Alias + name: na_ontap_wwpn_alias + namespace: '' + - description: NetApp ONTAP Run any ZAPI on ONTAP + name: na_ontap_zapit + namespace: '' + release_date: '2020-04-01' + 20.4.1: + changes: + bugfixes: + - na_ontap_info - ``metrocluster_check_info`` has been removed as it was breaking + the info module for everyone who didn't have a metrocluster set up. We are + working on adding this back in a future update. + - na_ontap_volume - ``volume_security_style`` option now allows modify. + minor_changes: + - na_ontap_autosupport_invoke - added REST support for sending autosupport message. + - na_ontap_firmware_upgrade - ``force_disruptive_update`` and ``package_url`` + options allows to make choices for download and upgrading packages. + - na_ontap_vserver_create has a new default variable ``netapp_version`` set + to 140. If you are running 9.2 or below please add the variable to your playbook + and set to 120 + fragments: + - 20.4.1.yaml + release_date: '2020-04-13' + 20.5.0: + changes: + bugfixes: + - REST API call now honors the ``http_port`` parameter. + - REST API detection now works with vserver (use_rest - Auto). + - na_ontap_autosupport_invoke - when using ZAPI and name is not given, send + autosupport message to all nodes in the cluster. + - na_ontap_cg_snapshot - properly states it does not support check_mode. + - na_ontap_cluster - ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster. + - na_ontap_cluster_ha - support check_mode. + - na_ontap_cluster_peer - EMS log wrongly uses destination credentials with + source hostname. + - na_ontap_cluster_peer - support check_mode. + - na_ontap_disks - support check_mode. + - na_ontap_dns - support check_mode. + - na_ontap_efficiency_policy - change ``duration`` type from int to str to support + '-' input. + - na_ontap_fcp - support check_mode. + - na_ontap_flexcache - support check_mode. + - na_ontap_info - `metrocluster_check_info` does not trigger a traceback but + adds an "error" info element if the target system is not set up for metrocluster. + - na_ontap_license - support check_mode. + - na_ontap_login_messages - fix documentation link. + - na_ontap_node - support check mode. + - na_ontap_ntfs_sd - documentation string update for examples and made sure + owner or group not mandatory. + - na_ontap_ports - now support check mode. + - na_ontap_restit - error can be a string in addition to a dict. This fix removes + a traceback with AttributeError. + - na_ontap_routes - support Check Mode correctly. + - na_ontap_snapmirror - support check_mode. + - na_ontap_software_update - Incorrectly stated that it support check mode, + it does not. + - na_ontap_svm_options - support check_mode. + - na_ontap_volume - fix KeyError on 'style' when volume is offline. + - na_ontap_volume - improve error reporting if required parameter is present + but not set. + - na_ontap_volume - suppress traceback in wait_for_completion as volume may + not be completely ready. + - na_ontap_volume_autosize - Support check_mode when `reset` option is given. + - na_ontap_volume_snaplock - fix documentation link. + - na_ontap_vserver_peer - EMS log wrongly uses destination credentials with + source hostname. + - na_ontap_vserver_peer - support check_mode. + minor_changes: + - na_ontap_aggregate - ``raid_type`` options supports 'raid_0' for ONTAP Select. + - na_ontap_cluster_config - role - Port Flowcontrol and autonegotiate can be + set in role + - na_ontap_cluster_peer - ``encryption_protocol_proposed`` option allows specifying + encryption protocol to be used for inter-cluster communication. + - na_ontap_info - new fact - aggr_efficiency_info. + - na_ontap_info - new fact - cluster_switch_info. + - na_ontap_info - new fact - disk_info. + - na_ontap_info - new fact - env_sensors_info. + - na_ontap_info - new fact - net_dev_discovery_info. + - na_ontap_info - new fact - service_processor_info. + - na_ontap_info - new fact - shelf_info. + - na_ontap_info - new fact - sis_info. + - na_ontap_info - new fact - subsys_health_info. + - na_ontap_info - new fact - sys_cluster_alerts. + - na_ontap_info - new fact - sysconfig_info. + - na_ontap_info - new fact - volume_move_target_aggr_info. + - na_ontap_info - new fact - volume_space_info. + - na_ontap_nvme_namespace - ``block_size`` option allows specifying size in + bytes of a logical block. + - na_ontap_snapmirror - snapmirror now allows resume feature. + - na_ontap_volume - ``cutover_action`` option allows specifying the action to + be taken for cutover. + fragments: + - 20.5.0.yaml + modules: + - description: NetApp ONTAP information gatherer using REST APIs + name: na_ontap_rest_info + namespace: '' + release_date: '2020-05-07' + 20.6.0: + changes: + bugfixes: + - module_utils/netapp_module - cater for empty lists in get_modified_attributes(). + - module_utils/netapp_module - cater for lists with duplicate elements in compare_lists(). + - na_ontap_firmware_upgrade - ignore timeout when downloading firmware images + by default. + - na_ontap_info - conversion from '-' to '_' was not done for lists of dictionaries. + - na_ontap_ntfs_dacl - example fix in documentation string. + - na_ontap_snapmirror - could not delete all rules (bug in netapp_module). + - na_ontap_volume - `wait_on_completion` is supported with volume moves. + - na_ontap_volume - fix KeyError on 'style' when volume is of type - data-protection. + - na_ontap_volume - modify was invoked multiple times when once is enough. + minor_changes: + - all modules - SSL certificate authentication in addition to username/password + (python 2.7 or 3.x). + - all modules - ``cert_filepath``, ``key_filepath`` to enable SSL certificate + authentication (python 2.7 or 3.x). + - na_ontap_disks - ``disk_type`` option allows to assign specified type of disk. + - na_ontap_firmware_upgrade - ignore timeout when downloading image unless ``fail_on_502_error`` + is set to true. + - na_ontap_info - ``desired_attributes`` advanced feature to select which fields + to return. + - na_ontap_info - ``use_native_zapi_tags`` to disable the conversion of '_' + to '-' for attribute keys. + - na_ontap_pb_install_SSL_certificate.yml - playbook example - installing a + self-signed SSL certificate, and enabling SSL certificate authentication. + - na_ontap_rest_info - ``fields`` options to request specific fields from subset. + - na_ontap_snapmirror - now performs restore with optional field ``source_snapshot`` + for specific snapshot or uses latest. + - na_ontap_software_update - ``stabilize_minutes`` option specifies number of + minutes needed to stabilize node before update. + - na_ontap_ucadapter - ``pair_adapters`` option allows specifying the list of + adapters which also need to be offline. + - na_ontap_user - ``authentication_password`` option specifies password for + the authentication protocol of SNMPv3 user. + - na_ontap_user - ``authentication_protocol`` option specifies authentication + protocol fo SNMPv3 user. + - na_ontap_user - ``engine_id`` option specifies authoritative entity's EngineID + for the SNMPv3 user. + - na_ontap_user - ``privacy_password`` option specifies password for the privacy + protocol of SNMPv3 user. + - na_ontap_user - ``privacy_protocol`` option specifies privacy protocol of + SNMPv3 user. + - na_ontap_user - ``remote_switch_ipaddress`` option specifies the IP Address + of the remote switch of SNMPv3 user. + - na_ontap_user - added REST support for ONTAP user creation, modification & + deletion. + - na_ontap_volume - ``auto_remap_luns`` option controls automatic mapping of + LUNs during volume rehost. + - na_ontap_volume - ``check_interval`` option checks if a volume move has been + completed and then waits this number of seconds before checking again. + - na_ontap_volume - ``force_restore`` option forces volume to restore even if + the volume has one or more newer Snapshotcopies. + - na_ontap_volume - ``force_unmap_luns`` option controls automatic unmapping + of LUNs during volume rehost. + - na_ontap_volume - ``from_vserver`` option allows volume rehost from one vserver + to another. + - na_ontap_volume - ``preserve_lun_ids`` option controls LUNs in the volume + being restored will remain mapped and their identities preserved. + - na_ontap_volume - ``snapshot_restore`` option specifies name of snapshot to + restore from. + fragments: + - 20.6.0.yaml + release_date: '2020-06-03' + 20.6.1: + changes: + bugfixes: + - na_ontap_firmware_upgrade - images are not downloaded, but the module reports + success. + - na_ontap_password - do not error out if password is identical to previous + password (idempotency). + - na_ontap_user - fixed KeyError if password is not provided. + minor_changes: + - na_ontap_firmware_upgrade - ``reboot_sp`` - reboot service processor before + downloading package. + - na_ontap_firmware_upgrade - ``rename_package`` - rename file when downloading + service processor package. + - na_ontap_firmware_upgrade - ``replace_package`` - replace local file when + downloading service processor package. + fragments: + - 20.6.1.yaml + release_date: '2020-06-08' + 20.7.0: + changes: + bugfixes: + - na_ontap_command - replace invalid backspace characters (0x08) with '.'. + - na_ontap_firmware_download - exception on PCDATA if ONTAP returns a BEL (0x07) + character. + - na_ontap_info - lists were incorrectly processed in convert_keys, returning + {}. + - na_ontap_info - qtree_info is missing most entries. Changed key from `vserver:id` + to `vserver:volume:id` . + - na_ontap_iscsi_security - adding no_log for password parameters. + - na_ontap_portset - adding explicit error message as modify portset is not + supported. + - na_ontap_snapmirror - fixed snapmirror delete for loadsharing to not go to + quiesce state for the rest of the set. + - na_ontap_ucadapter - fixed KeyError if type is not provided and mode is 'cna'. + - na_ontap_user - checked `applications` does not contain snmp when using REST + API call. + - na_ontap_user - fixed KeyError if locked key not set with REST API call. + - na_ontap_user - fixed KeyError if vserver - is empty with REST API call (useful + to indicate cluster scope). + - na_ontap_volume - fixed KeyError when getting info on a MVD volume + minor_changes: + - module_utils/netapp - add retry on wait_on_job when job failed. Abort 3 consecutive + errors. + - na_ontap_info - support ``continue_on_error`` option to continue when a ZAPI + is not supported on a vserver, or for cluster RPC errors. + - na_ontap_info - support ``query`` option to specify which objects to return. + - na_ontap_info - support ``vserver`` tunneling to limit output to one vserver. + - na_ontap_pb_get_online_volumes.yml - example playbook to list volumes that + are online (or offline). + - na_ontap_pb_install_SSL_certificate_REST.yml - example playbook to install + SSL certificates using REST APIs. + - na_ontap_rest_info - Support for gather subsets - ``cluster_node_info, cluster_peer_info, + disk_info, cifs_services_info, cifs_share_info``. + - na_ontap_snapmirror_policy - support for SnapMirror policy rules. + - na_ontap_vscan_scanner_pool - support modification. + fragments: + - 20.7.0.yaml + modules: + - description: NetApp ONTAP manage security certificates. + name: na_ontap_security_certificates + namespace: '' + release_date: '2020-06-24' + 20.8.0: + changes: + bugfixes: + - na_ontap_aggregate - ``disk-info`` error when using ``disks`` option. + - na_ontap_autosupport_invoke - ``message`` has changed to ``autosupport_message`` + as Redhat has reserved this word. ``message`` has been alias'd to ``autosupport_message``. + - na_ontap_cifs_vserver - fix documentation and add more examples. + - na_ontap_cluster - module was not idempotent when changing location or contact + information. + - na_ontap_igroup - idempotency issue when using uppercase hex digits (A, B, + C, D, E, F) in WWN (ONTAP uses lowercase). + - na_ontap_igroup_initiator - idempotency issue when using uppercase hex digits + (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). + - na_ontap_info - Fixed error causing module to fail on ``metrocluster_check_info``, + ``env_sensors_info`` and ``volume_move_target_aggr_info``. + - na_ontap_security_certificates - allows (``common_name``, ``type``) as an + alternate key since ``name`` is not supported in ONTAP 9.6 and 9.7. + - na_ontap_snapmirror - fixed KeyError when accessing ``elationship_type`` parameter. + - na_ontap_snapmirror_policy - fixed a race condition when creating a new policy. + - na_ontap_snapmirror_policy - fixed idempotency issue withis_network_compression_enabled + for REST. + - na_ontap_software_update - ignore connection errors during update as nodes + cannot be reachable. + - na_ontap_user - enable lock state and password to be set in the same task + for existing user. + - na_ontap_volume - issue when snapdir_access and atime_update not passed together. + - na_ontap_vscan_on_access_policy - ``bool`` type was not properly set for ``scan_files_with_no_ext``. + - na_ontap_vscan_on_access_policy - ``policy_status`` enable/disable option + was not supported. + - na_ontap_vscan_on_demand_task - ``file_ext_to_include`` was not handled properly. + - na_ontap_vscan_scanner_pool_policy - scanner_pool apply policy support on + modification. + - na_ontap_vserver_create(role) - lif creation now defaults to system-defined + unless iscsi lif type. + - use_rest is now case insensitive. + minor_changes: + - add ``type:`` and ``elements:`` information where missing. + - na_ontap_aggregate - support ``disk_size_with_unit`` option. + - na_ontap_ldap_client - support ``ad_domain`` and ``preferred_ad_server`` options. + - na_ontap_qtree - ``force_delete`` option with a DEFAULT of ``true`` so that + ZAPI behavior is aligned with REST. + - na_ontap_rest_info - Support for gather subsets - ``cloud_targets_info, cluster_chassis_info, + cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, + cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, + ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, + san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, + svm_nis_config_info, svm_peers_info, svm_peer-permissions_info``. + - na_ontap_rest_info - Support for gather subsets for 9.8+ - ``cluster_metrocluster_diagnostics``. + - na_ontap_security_certificates - ``ignore_name_if_not_supported`` option to + not fail if ``name`` is present since ``name`` is not supported in ONTAP 9.6 + and 9.7. + - na_ontap_software_update - added ``timeout`` option to give enough time for + the update to complete. + - update ``required:`` information. + - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same + thing for 2.8 and 2.9. + fragments: + - 20.8.0.yaml + modules: + - description: NetApp ONTAP create, delete, or modify vserver security file-directory + policy + name: na_ontap_file_directory_policy + namespace: '' + - description: NetApp ONTAP Run any cli command over plain SSH using paramiko. + name: na_ontap_ssh_command + namespace: '' + - description: NetApp ONTAP wait_for_condition. Loop over a get status request + until a condition is met. + name: na_ontap_wait_for_condition + namespace: '' + release_date: '2020-08-05' + 20.9.0: + changes: + bugfixes: + - na_ontap_* - change version_added from '2.6' to '2.6.0' where applicable to + satisfy sanity checker. + - na_ontap_cluster - ``check_mode`` is now working properly. + - na_ontap_interface - ``home_node`` is not required in pre-cluster mode. + - na_ontap_interface - ``role`` is not required if ``service_policy`` is present + and ONTAP version is 9.8. + - na_ontap_interface - traceback in get_interface if node is not reachable. + - na_ontap_job_schedule - allow ``job_minutes`` to set number to -1 for job + creation with REST too. + - na_ontap_qtree - fixed ``None is not subscriptable`` exception on rename operation. + - na_ontap_volume - fixed ``KeyError`` exception on ``size`` when reporting + creation error. + - netapp.py - uncaught exception (traceback) on zapi.NaApiError. + minor_changes: + - na_ontap_cluster - ``node_name`` to set the node name when adding a node, + or as an alternative to `cluster_ip_address`` to remove a node. + - na_ontap_cluster - ``state`` can be set to ``absent`` to remove a node identified + with ``cluster_ip_address`` or ``node_name``. + - na_ontap_qtree - ``wait_for_completion`` and ``time_out`` to wait for qtree + deletion when using REST. + - na_ontap_quotas - ``soft_disk_limit`` and ``soft_file_limit`` for the quota + target. + - na_ontap_rest_info - Support for gather subsets - ``initiator_groups_info, + san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, + storage_luns_info, storage_NVMe_namespaces.`` + fragments: + - 20.9.0.yaml + modules: + - description: NetApp ONTAP configure active directory + name: na_ontap_active_directory + namespace: '' + - description: NetApp ONTAP Add and Remove MetroCluster Mediator + name: na_ontap_mcc_mediator + namespace: '' + - description: NetApp ONTAP set up a MetroCluster + name: na_ontap_metrocluster + namespace: '' + release_date: '2020-09-02' + 21.1.0: + changes: + bugfixes: + - na_ontap_lun - REST expects 'all' for tiering policy and not 'backup'. + - na_ontap_quotas - Handle blank string idempotency issue for ``quota_target`` + in quotas module. + - na_ontap_rest_info - ``changed`` was set to "False" rather than boolean False. + - na_ontap_snapmirror - fix job update failures for load_sharing mirrors. + - na_ontap_snapmirror - report error when attempting to change relationship_type. + - na_ontap_snapmirror - wait up to 5 minutes for abort to complete before issuing + a delete. + - na_ontap_snmp - SNMP module wrong ``access_control`` issue and error handling + fix. + - na_ontap_volume - REST expects 'all' for tiering policy and not 'backup'. + - na_ontap_volume - detect and report error when attempting to change FlexVol + into FlexGroup. + - na_ontap_volume - report error if ``aggregate_name`` option is used with a + FlexGroup. + minor_changes: + - general - improve error reporting when older version of netapp-lib is used. + - na_ontap_cluster - ``time_out`` to wait for cluster creation, adding and removing + a node. + - na_ontap_debug - connection diagnostics added for invalid ipaddress and DNS + hostname errors. + - na_ontap_firmware_upgrade - new option for firmware type ``storage`` added. + - na_ontap_info - deprecate ``state`` option. + - na_ontap_lun - new options ``total_size`` and ``total_size_unit`` when using + SAN application template. + - na_ontap_lun - support increasing lun_count and total_size when using SAN + application template. + - na_ontap_quota - allow to turn quota on/off without providing quota_target + or type. + - na_ontap_rest_info - deprecate ``state`` option. + - na_ontap_snapmirror - new option ``create_destination`` to automatically create + destination endpoint (ONTAP 9.7). + - na_ontap_snapmirror - new option ``destination_cluster`` to automatically + create destination SVM for SVM DR (ONTAP 9.7). + - na_ontap_snapmirror - new option ``source_cluster`` to automatically set SVM + peering (ONTAP 9.7). + - na_ontap_snapmirror - use REST API for create action if target supports it. (ZAPIs + are still used for all other actions). + - na_ontap_volume - use REST API for delete operation if targets supports it. + fragments: + - DEVOPS-2491.yaml + - DEVOPS-2928.yaml + - DEVOPS-3137.yaml + - DEVOPS-3242.yaml + - DEVOPS-3370.yaml + - DEVOPS-3439.yaml + - DEVOPS-3480.yaml + - DEVOPS-3490.yaml + - DEVOPS-3494.yaml + - DEVOPS-3497.yaml + - DEVOPS-3501.yaml + - DEVOPS-3510.yaml + modules: + - description: NetApp ONTAP Debug netapp-lib import and connection. + name: na_ontap_debug + namespace: '' + release_date: '2021-01-07' + 21.10.0: + changes: + bugfixes: + - all modules - traceback on ONTAP 9.3 (and earlier) when trying to detect REST + support. + - na_ontap_vserver_delete role - delete iSCSI igroups and CIFS server before + deleting vserver. + minor_changes: + - na_ontap_cifs_server - ``force`` option is supported when state is absent + to ignore communication errors. + fragments: + - DEVOPS-4190.yaml + - DEVOPS-4231.yaml + release_date: '2021-08-12' + 21.11.0: + changes: + bugfixes: + - na_ontap_job_schedule - fix idempotency issue with REST when job_minutes is + set to -1. + - na_ontap_ldap_client - remove limitation on schema so that custom schemas + can be used. + minor_changes: + - na_ontap_interface - new option ``from_name`` to rename an interface. + - na_ontap_ntp - Added REST support to the ntp module + - na_ontap_ntp - Added REST support to the ntp module + - na_ontap_software_update - new option ``validate_after_download`` to run ONTAP + software update validation checks. + - na_ontap_software_update - remove ``absent`` as a choice for ``state`` as + it has no use. + - na_ontap_svm - ignore ``aggr_list`` with ``'*'`` when using REST. + - na_ontap_svm - new option ``ignore_rest_unsupported_options`` to ignore older + ZAPI options not available in REST. + - na_ontap_svm - new option ``services`` to allow and/or enable protocol services. + fragments: + - DEVOPS-2459.yaml + - DEVOPS-2459.yml + - DEVOPS-4218.yaml + - DEVOPS-4227.yaml + - DEVOPS-4235.yaml + - DEVOPS-4243.yaml + - DEVOPS-4255.yaml + - DEVOPS-4256.yaml + release_date: '2021-09-01' + 21.12.0: + changes: + bugfixes: + - na_ontap_job_schedule - cannot modify options not present in create when using + REST. + - na_ontap_job_schedule - fix idempotency issue with ZAPI when job_minutes is + set to -1. + - na_ontap_job_schedule - modify error if month is changed from some values + to all (-1) when using REST. + - na_ontap_job_schedule - modify error if month is present but not changed with + 0 offset when using REST. + - na_ontap_vserver_delete role - fix typos for cifs. + minor_changes: + - na_ontap_cluster - Added REST support to the cluster module. + - na_ontap_firewall_policy - added ``none`` as a choice for ``service`` which + is supported from 9.8 ONTAP onwards. + - na_ontap_svm - new option ``max_volumes``. + - na_ontap_svm - support ``allowed protocols`` with REST for ONTAP 9.6 and later. + fragments: + - 0-copy_ignore_txt.yml + - DEVOPS-4123.yaml + - DEVOPS-4270.yaml + - DEVOPS-4288.yaml + - DEVOPS-4300.yaml + - DEVOPS-4320.yaml + release_date: '2021-10-06' + 21.13.0: + changes: + bugfixes: + - na_ontap_cluster - ``single_node_cluster`` was silently ignored with REST. + - na_ontap_cluster - switch to ZAPI when DELETE is required with ONTAP 9.6. + - na_ontap_snapmirror - ``source_path`` and ``source_hostname`` parameters are + not mandatory to delete snapmirror relationship when source cluster is unknown, + if specified it will delete snapmirror at destination and release the same + at source side. if not, it only deletes the snapmirror at destination and + will not look for source to perform snapmirror release. + - na_ontap_snapmirror - modify policy, schedule and other parameter failure + are fixed. + - na_ontap_snapshot - ``expiry_time`` required REST api, will return error if + set when using ZAPI. + - na_ontap_snapshot - ``snapmirror_label`` is supported with REST on ONTAP 9.7 + or higher, report error if used on ONTAP 9.6. + - na_ontap_storage_failover - KeyError on 'ha' if the system is not configured + as HA. + - na_ontap_svm - module will on init if a rest only and zapi only option are + used at the same time. + minor_changes: + - PR15 - allow usage of Ansible module group defaults - for Ansible 2.12+. + - na_ontap_cluster - add ``force`` option when deleting a node. + - na_ontap_interface - Added REST support to the interface module (for IP and + FC interfaces). + - na_ontap_net_vlan - Added REST support to the net vlan module. + - na_ontap_net_vlan - new REST options ``broadcast_domain``, ``ipspace`` and + ``enabled`` added. + - na_ontap_object_store - new REST options ``owner`` and ``change_password``. + - na_ontap_object_store - support modifying an object store config with REST. + fragments: + - DEVOPS-3148.yaml + - DEVOPS-4196.yaml + - DEVOPS-4228.yaml + - DEVOPS-4289.yaml + - DEVOPS-4319.yaml + - DEVOPS-4334.yaml + - DEVOPS-4391.yaml + - DEVOPS-4392.yaml + - DEVOPS-4399.yaml + - DEVOPS-4401.yaml + - DEVOPS-4404.yaml + - DEVOPS-4435.yml + release_date: '2021-11-03' + 21.13.1: + changes: + bugfixes: + - cluster scoped modules are failing on FSx with 'Vserver API missing vserver + parameter' error. + fragments: + - DEVOPS-4439.yaml + release_date: '2021-11-05' + 21.14.0: + changes: + bugfixes: + - fix error where module will fail for ONTAP 9.6 if use_rest was set to auto + - na_ontap_cifs_local_user_modify - KeyError on ``description`` or ``full_name`` + with REST. + - na_ontap_cifs_local_user_modify - unexpected argument ``name`` error with + REST. + - na_ontap_export_policy - fix error if more than 1 verser matched search name, + the wrong uuid could be given + - na_ontap_net_routes - metric was not always modified with ZAPI. + - na_ontap_net_routes - support cluster-scoped routes with REST. + - na_ontap_vserver_delete role - report error if ONTAP version is 9.6 or older. + minor_changes: + - na_ontap_aggregate - new option ``encryption`` to enable encryption with ZAPI. + - na_ontap_fcp -- Added REST support for FCP + - na_ontap_net_ifgrp - Added REST support to the net ifgrp module. + - na_ontap_net_ifgrp - new REST only options ``from_lag_ports``, ``broadcast_domain`` + and ``ipspace`` added. + - na_ontap_net_port - Added REST support to the net port module + - na_ontap_restit - new option ``wait_for_completion`` to support asynchronous + operations and wait for job completion. + - na_ontap_volume - Added REST support to the volume module + - na_ontap_volume_efficiency - new option ``storage_efficiency_mode`` for AFF + only with 9.10.1 or later. + - na_ontap_vserver_delete role - added set_fact to accept ``netapp_{hostname|username|password}`` + or ``hostname,username and password`` variables. + - na_ontap_vserver_delete role - do not report an error if the vserver does + not exist. + - na_ontap_vserver_peer - Added REST support to the vserver_peer module + fragments: + - DEVOPS-2422.yaml + - DEVOPS-2459b.yaml + - DEVOPS-4119.yaml + - DEVOPS-4206.yaml + - DEVOPS-4312.yml + - DEVOPS-4339.yaml + - DEVOPS-4340.yaml + - DEVOPS-4344.yaml + - DEVOPS-4345.yaml + - DEVOPS-4457.yaml + - DEVOPS-4459.yaml + - DEVOPS-4460.yaml + - DEVOPS-4465.yml + - DEVOPS-4479.yaml + release_date: '2021-12-01' + 21.14.1: + changes: + bugfixes: + - na_ontap_net_ifgrp - fix error in modify ports with zapi. + fragments: + - DEVOPS-4487.yaml + release_date: '2021-12-06' + 21.15.0: + changes: + bugfixes: + - na_ontap_broadcast_domain - fix idempotency issue when ``ports`` has identical + values. + - na_ontap_info - fix KeyError on node for aggr_efficiency_info option against + a metrocluster system. + - na_ontap_volume - Fixed issue that would fail the module in REST when changing + `is_online` if two vserver volume had the same name. + - na_ontap_volume - If using REST and ONTAP 9.6 and `efficiency_policy` module + will fail as `efficiency_policy` is not supported in ONTAP 9.6. + - na_ontap_volume_efficiency - Removed restriction on policy name. + minor_changes: + - na_ontap_broadcast_domain - Added REST support to the broadcast domain module. + - na_ontap_broadcast_domain - new REST only option ``from_ipspace`` added. + - na_ontap_broadcast_domain_ports - warn about deprecation, fall back to ZAPI + or fail when REST is desired. + - na_ontap_export_policy_rule -- Added Rest support for Export Policy Rules + - na_ontap_firmware_upgrade - REST support to download firmware and reboot SP. + - na_ontap_license - Added REST support to the license module. + - na_ontap_rest_info - update documention for `fields` to clarify the list of + fields that are return by default. + - na_ontap_svm - new REST options of svm admin_state ``stopped`` and ``running`` + added. + fragments: + - DEVOPS-1661.yaml + - DEVOPS-1665.yaml + - DEVOPS-4121.yaml + - DEVOPS-4175.yaml + - DEVOPS-4325.yml + - DEVOPS-4335.yaml + - DEVOPS-4338.yml + - DEVOPS-4501.yaml + - DEVOPS-4508.yaml + - DEVOPS-4526.yaml + - DEVOPS-4565.yaml + - DEVOPS-4566.yaml + - DEVOPS-4568.yaml + release_date: '2022-01-12' + 21.15.1: + changes: + bugfixes: + - na_ontap_export_policy_rule - Fixed bug that prevent ZAPI and REST calls from + working correctly + fragments: + - DEVOPS-4573.yaml + release_date: '2022-01-14' + 21.16.0: + changes: + bugfixes: + - four modules (mediator, metrocluster, security_certificates, wwpn_alias) would + report a None error when REST is not available. + - module_utils - fixed KeyError on Allow when using OPTIONS method and the API + failed. + - na_ontap_active_directory - Fixed idempotency and traceback issues. + - na_ontap_aggregate - Fixed KeyError on unmount_volumes when offlining a volume + if option is not set. + - na_ontap_aggregate - Report an error when attempting to change snaplock_type. + - na_ontap_igroup - ``force_remove_initiator`` option was ignored when removing + initiators from existing igroup. + - na_ontap_info - Add active_directory_account_info. + - na_ontap_security_certificates - ``intermediate_certificates`` option was + ignored. + - na_ontap_user - Fixed TypeError 'tuple' object does not support item assignment. + - na_ontap_user - Fixed issue when attempting to change pasword for absent user + when set_password is set. + - na_ontap_user - Fixed lock state is not set if password is not changed. + - na_ontap_volume - Fixed error when creating a flexGroup when ``aggregate_name`` + and ``aggr_list_multiplier`` are not set in rest. + - na_ontap_volume - Fixed error with unmounting junction_path in rest. + - na_ontap_volume - report error when attempting to change the nas_application + tiering control from disalllowed to required, or reciprocally. + minor_changes: + - na_ontap_aggregate - Added REST support. + - na_ontap_aggregate - Added ``disk_class`` option for REST and ZAPI. + - na_ontap_aggregate - Extended accepted ``disk_type`` values for ZAPI. + - na_ontap_cifs_server - Added REST support to the cifs server module. + - na_ontap_ports - Added REST support to the ports module. + - na_ontap_snapmirror - Added REST support to the na_ontap_snapmirror module + - na_ontap_volume - ``logical_space_enforcement`` to specifies whether to perform + logical space accounting on the volume. + - na_ontap_volume - ``logical_space_reporting`` to specifies whether to report + space logically on the volume. + - na_ontap_volume - ``tiering_minimum_cooling_days`` to specify how many days + must pass before inactive data in a volume using the Auto or Snapshot-Only + policy is considered cold and eligible for tiering. + - na_ontap_volume_clone - Added REST support. + fragments: + - DEVOPS-3515.yaml + - DEVOPS-4079.yaml + - DEVOPS-4179.yml + - DEVOPS-4331.yaml + - DEVOPS-4332.yaml + - DEVOPS-4337.yaml + - DEVOPS-4349.yaml + - DEVOPS-4393.yaml + - DEVOPS-4394.yaml + - DEVOPS-4527.yaml + - DEVOPS-4540.yaml + - DEVOPS-4554.yaml + - DEVOPS-4577.yaml + - DEVOPS-4609.yaml + - DEVOPS-4621.yaml + - DEVOPS-4623.yaml + release_date: '2022-02-02' + 21.17.0: + changes: + bugfixes: + - na_ontap_aggregate - Fixed UUID issue when attempting to attach object store + as part of creating the aggregate with REST. + - na_ontap_cifs_server - error out if ZAPI only options ``force`` or ``workgroup`` + are used with REST. + - na_ontap_cluster_peer - Fixed KeyError if both ``source_intercluster_lifs`` + and ``dest_intercluster_lifs`` not present in cluster create. + - na_ontap_rest_info - Fixed example with wrong indentation for ``use_python_keys``. + minor_changes: + - all modules that only support ZAPI - warn when ``use_rest`` with a value of + ``always`` is ignored. + - na_ontap_cifs_acl - Added REST support to the cifs share access control module. + - na_ontap_cifs_acl - new option ``type`` for user-group-type. + - na_ontap_cifs_share - Added REST support to the cifs share module. + - na_ontap_cluster_peer - Added REST support to the cluster_peer module. + - na_ontap_lun_map - Added REST support. + - na_ontap_nfs - Added Rest Support + - na_ontap_volume_clone - Added REST support. + fragments: + - DEVOPS-4329.yaml + - DEVOPS-4341.yaml + - DEVOPS-4343.yaml + - DEVOPS-4350.yaml + - DEVOPS-4604.yaml + - DEVOPS-4605.yaml + - DEVOPS-4645.yaml + - DEVOPS-4648.yaml + - DEVOPS-4676.yaml + - DEVOPS-4679.yaml + - DEVOPS-4711.yaml + release_date: '2022-03-02' + 21.17.1: + changes: + bugfixes: + - na_ontap_lun_map - fixed bugs resulting in REST support to not work. + fragments: + - DEVOPS-4729.yml + release_date: '2022-03-07' + 21.17.2: + changes: + bugfixes: + - na_ontap_lun_map - Fixed bug when deleting lun map using REST. + - na_ontap_rest_info - Fixed an issues with adding field to specific info that + didn't have a direct REST equivalent. + fragments: + - DEVOPS-4719.yml + release_date: '2022-03-08' + 21.18.0: + changes: + bugfixes: + - Fixed ONTAP minor version ignored in checking minimum ONTAP version. + - na_ontap_aggregate - Fixed error in delete aggregate if the ``disk_count`` + is less than current disk count. + - na_ontap_autosupport - Fixed `partner_address` not working in REST. + - na_ontap_command - document that a READONLY user is not supported, even for + show commands. + - na_ontap_disk_options - ONTAP 9.10.1 returns on/off rather than True/False. + - na_ontap_info - Fixes issue with na_ontap_info failing in 9.1 because of ``job-schedule-cluster``. + - na_ontap_iscsi - Fixed issue with ``start_state`` always being set to stopped + when creating an ISCSI. + - na_ontap_lun_map - TypeError - '>' not supported between instances of 'int' + and 'str '. + - na_ontap_qtree - Fixed issue with ``oplocks`` not being changed during a modify + in Zapi. + - na_ontap_qtree - Fixed issue with ``oplocks`` not warning user about not being + supported in REST + - na_ontap_snapmirror - Added use_rest condition for the REST support to work + when use_rest `always`. + - na_ontap_snapshot - add error message if volume is not found with REST. + - na_ontap_snapshot - fix key error on volume when using REST. + - na_ontap_svm - fixed KeyError issue on protocols when vserver is stopped. + - na_ontap_volume - do not attempt to mount volume if current state is offline. + - na_ontap_volume - fix idempotency issue with compression settings when using + REST. + - na_ontap_vserver_peer - Added cluster peer accept code in REST. + - na_ontap_vserver_peer - Fixed AttributeError if ``dest_hostname`` or ``peer_options`` + not present. + - na_ontap_vserver_peer - Fixed ``local_name_for_peer`` and ``local_name_for_source`` + options silently ignored in REST. + - na_ontap_vserver_peer - Get peer cluster name if remote peer exist else use + local cluster name. + - na_ontap_vserver_peer - ignore job entry doesn't exist error with REST to + bypass ONTAP issue with FSx. + - na_ontap_vserver_peer - report error if SVM peer does not see a peering relationship + after create. + minor_changes: + - na_ontap_cluster_config role - use na_ontap_login_messages as na_ontap_motd + is deprecated. + - na_ontap_debug - report ansible version and ONTAP collection version. + - na_ontap_efficiency_policy - Added REST support. + - na_ontap_export_policy_rule - new option ``ntfs_unix_security`` for NTFS export + UNIX security options added. + - na_ontap_lun - Added REST support. + - na_ontap_snapmirror -- Added more descriptive error messages for REST + - na_ontap_snapshot_policy - Added REST support to the na_ontap_snapshot_policy + module. + - na_ontap_svm - add support for web services (ssl modify) - REST only with + 9.8 or later. + - na_ontap_volume - add support for SnapLock - only for REST. + - na_ontap_volume - allow to modify volume after rename. + - na_ontap_volume - new option ``max_files`` to increase the inode count value. + - na_ontap_vserver_create role - support max_volumes option. + fragments: + - DEVOPS-2972.yaml + - DEVOPS-4333.yaml + - DEVOPS-4342.yml + - DEVOPS-4588.yaml + - DEVOPS-4612.yaml + - DEVOPS-4731.yaml + - DEVOPS-4736.yaml + - DEVOPS-4737.yaml + - DEVOPS-4743.yaml + - DEVOPS-4745.yaml + - DEVOPS-4747.yaml + - DEVOPS-4764.yaml + - DEVOPS-4804.yaml + - DEVOPS-4807.yaml + - DEVOPS-4808.yaml + - DEVOPS-4809.yaml + - DEVOPS-4813.yaml + - DEVOPS-4818.yaml + - DEVOPS-4832.yml + - DEVOPS-4834.yaml + - DEVOPS-4864.yaml + release_date: '2022-04-05' + 21.18.1: + changes: + bugfixes: + - na_ontap_iscsi - fixed error starting iscsi service on vserver where Service, + adapter, or operation already started. + - na_ontap_lun - Fixed KeyError on options ``force_resize``, ``force_remove`` + and ``force_remove_fenced`` in Zapi. + - na_ontap_lun - Fixed ``force_remove`` option silently ignored in REST. + - na_ontap_snapshot_policy - Do not validate parameter when state is ``absent`` + and fix KeyError on ``comment``. + fragments: + - DEVOPS-4872.yaml + - DEVOPS-4879.yaml + - DEVOPS-4975.yaml + release_date: '2022-04-13' + 21.19.0: + changes: + bugfixes: + - na_ontap_cifs - fixed `symlink_properties` option silently ignored for cifs + share creation when using REST. + - na_ontap_cifs - fixed error in modifying comment if it is not set while creating + CIFS share in REST. + - na_ontap_command - fix typo in example. + - na_ontap_interface - rename fails with 'inconsistency in rename action' for + cluster interface with REST. + - na_ontap_login_messages - fix typo in examples for username. + - na_ontap_nfs - fix TypeError on NoneType as ``tcp_max_xfer_size`` is not supported + in earlier ONTAP versions. + - na_ontap_nfs - fix ``Extra input`` error with ZAPI for ``is-nfsv4-enabled``. + - na_ontap_quotas - fix idempotency issue on ``disk_limit`` and ``soft_disk_limit``. + - na_ontap_service_policy - fix examples in documentation. + - na_ontap_volume - QOS policy was not set when using NAS application. + - na_ontap_volume - correctly warn when attempting to modify NAS application. + - na_ontap_volume - do not set encrypt on modify, as it is already handled with + specialized ZAPI calls. + - na_ontap_volume - use ``time_out`` value when creating/modifying/deleting + volumes with REST rathar than hardcoded value. + minor_changes: + - na_ontap_cifs - Added ``unix_symlink`` option in REST. + - na_ontap_cifs_server - Added ``force`` option for create, delete and rename + cifs server when using REST. + - na_ontap_cifs_server - Added ``from_name`` option to rename cifs server when + using REST. + - na_ontap_igroup_initiator - Added REST support. + - na_ontap_interface - use REST when ``use_rest`` is set to ``auto``. + - na_ontap_iscsi - Added REST support. + - na_ontap_nvme - Added REST support. + - na_ontap_qos_adaptive_policy_group - warn about deprecation, fall back to + ZAPI or fail when REST is desired. + - na_ontap_qos_policy_group - Added REST only supported option ``adaptive_qos_options`` + for configuring adaptive policy. + - na_ontap_qos_policy_group - Added REST only supported option ``fixed_qos_options`` + for configuring max/min throughput policy. + - na_ontap_qos_policy_group - Added REST support. + - na_ontap_quotas - support TB as a unit, update doc with size format description. + - na_ontap_rest_info - new option ``owning_resource`` for REST info that requires + an owning resource. For instance volume for a snapshot + - na_ontap_rest_info - support added for protocols/nfs/export-policies/rules + (Requires owning_resource to be set) + - na_ontap_rest_info - support added for storage/volumes/snapshots (Requires + owning_resource to be set) + - na_ontap_rest_info REST API's with hyphens in the name will now be converted + to underscores when ``use_python_keys`` is set to ``True`` so that YAML parsing + works correctly. + - na_ontap_rest_info support added for application/consistency-groups + - na_ontap_rest_info support added for cluster/fireware/history + - na_ontap_rest_info support added for cluster/mediators + - na_ontap_rest_info support added for cluster/metrocluster/dr-groups + - na_ontap_rest_info support added for cluster/metrocluster/interconnects + - na_ontap_rest_info support added for cluster/metrocluster/operations + - na_ontap_rest_info support added for cluster/ntp/keys + - na_ontap_rest_info support added for cluster/web + - na_ontap_rest_info support added for name-services/local-hosts + - na_ontap_rest_info support added for name-services/unix-groups + - na_ontap_rest_info support added for name-services/unix-users + - na_ontap_rest_info support added for network/ethernet/switch/ports + - na_ontap_rest_info support added for network/fc/ports + - na_ontap_rest_info support added for network/http-proxy + - na_ontap_rest_info support added for network/ip/bgp/peer-groups + - na_ontap_rest_info support added for protocols/audit + - na_ontap_rest_info support added for protocols/cifs/domains + - na_ontap_rest_info support added for protocols/cifs/local-groups + - na_ontap_rest_info support added for protocols/cifs/local-users + - na_ontap_rest_info support added for protocols/cifs/sessions + - na_ontap_rest_info support added for protocols/cifs/unix-symlink-mapping + - na_ontap_rest_info support added for protocols/cifs/users-and-groups/privilege + - na_ontap_rest_info support added for protocols/file-access-tracing/events + - na_ontap_rest_info support added for protocols/file-access-tracing/filters + - na_ontap_rest_info support added for protocols/fpolicy + - na_ontap_rest_info support added for protocols/locks + - na_ontap_rest_info support added for protocols/ndmp + - na_ontap_rest_info support added for protocols/ndmp/nodes + - na_ontap_rest_info support added for protocols/ndmp/sessions + - na_ontap_rest_info support added for protocols/ndmp/svms + - na_ontap_rest_info support added for protocols/nfs/connected-clients + - na_ontap_rest_info support added for protocols/nfs/kerberos/interfaces + - na_ontap_rest_info support added for protocols/nvme/subsystem-controllers + - na_ontap_rest_info support added for protocols/nvme/subsystem-maps + - na_ontap_rest_info support added for protocols/s3/buckets + - na_ontap_rest_info support added for protocols/s3/services + - na_ontap_rest_info support added for protocols/san/iscsi/sessions + - na_ontap_rest_info support added for protocols/san/portsets + - na_ontap_rest_info support added for protocols/san/vvol-bindings + - na_ontap_rest_info support added for security/anti-ransomware/suspects + - na_ontap_rest_info support added for security/audit + - na_ontap_rest_info support added for security/audit/messages + - na_ontap_rest_info support added for security/authentication/cluster/ad-proxy + - na_ontap_rest_info support added for security/authentication/cluster/ldap + - na_ontap_rest_info support added for security/authentication/cluster/nis + - na_ontap_rest_info support added for security/authentication/cluster/saml-sp + - na_ontap_rest_info support added for security/authentication/publickeys + - na_ontap_rest_info support added for security/azure-key-vaults + - na_ontap_rest_info support added for security/certificates + - na_ontap_rest_info support added for security/gcp-kms + - na_ontap_rest_info support added for security/ipsec + - na_ontap_rest_info support added for security/ipsec/ca-certificates + - na_ontap_rest_info support added for security/ipsec/policies + - na_ontap_rest_info support added for security/ipsec/security-associations + - na_ontap_rest_info support added for security/key-manager-configs + - na_ontap_rest_info support added for security/key-managers + - na_ontap_rest_info support added for security/key-stores + - na_ontap_rest_info support added for security/login/messages + - na_ontap_rest_info support added for security/ssh + - na_ontap_rest_info support added for security/ssh/svms + - na_ontap_rest_info support added for storage/cluster + - na_ontap_rest_info support added for storage/file/clone/split-loads + - na_ontap_rest_info support added for storage/file/clone/split-status + - na_ontap_rest_info support added for storage/file/clone/tokens + - na_ontap_rest_info support added for storage/monitored-files + - na_ontap_rest_info support added for storage/qos/workloads + - na_ontap_rest_info support added for storage/snaplock/audit-logs + - na_ontap_rest_info support added for storage/snaplock/compliance-clocks + - na_ontap_rest_info support added for storage/snaplock/event-retention/operations + - na_ontap_rest_info support added for storage/snaplock/event-retention/policies + - na_ontap_rest_info support added for storage/snaplock/file-fingerprints + - na_ontap_rest_info support added for storage/snaplock/litigations + - na_ontap_rest_info support added for storage/switches + - na_ontap_rest_info support added for storage/tape-devices + - na_ontap_rest_info support added for support/auto-update + - na_ontap_rest_info support added for support/auto-update/configurations + - na_ontap_rest_info support added for support/auto-update/updates + - na_ontap_rest_info support added for support/configuration-backup + - na_ontap_rest_info support added for support/configuration-backup/backups + - na_ontap_rest_info support added for support/coredump/coredumps + - na_ontap_rest_info support added for support/ems/messages + - na_ontap_rest_info support added for support/snmp + - na_ontap_rest_info support added for support/snmp/users + - na_ontap_rest_info support added for svm/migrations + - na_ontap_volume_autosize - improve error reporting. + fragments: + - DEVOPS-4415.yaml + - DEVOPS-4735.yaml + - DEVOPS-4769.yaml + - DEVOPS-4770.yaml + - DEVOPS-4779.yaml + - DEVOPS-4785.yaml + - DEVOPS-4786.yaml + - DEVOPS-4830.yaml + - DEVOPS-4898.yaml + - DEVOPS-4981.yaml + - DEVOPS-4984.yaml + - DEVOPS-4998.yaml + - DEVOPS-5015.yml + - DEVOPS-5016.yaml + - DEVOPS-5019.yaml + - DEVOPS-5026.yaml + - DEVOPS-5034.yaml + - DEVOPS-5047.yaml + modules: + - description: NetApp ONTAP S3 Buckets + name: na_ontap_s3_buckets + namespace: '' + release_date: '2022-05-04' + 21.19.1: + changes: + bugfixes: + - na_ontap_cluster_config - fix the role to be able to create intercluster LIFs + with REST (ipspace is required). + - na_ontap_interface - ignore ``vserver`` when using REST if role is one of + 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. + - na_ontap_nvme - fixed ``status_admin`` option is ignored if set to False when + creating nvme service in REST. + - na_ontap_nvme - fixed invalid boolean value error for ``status_admin`` when + creating nvme service in ZAPI. + - na_ontap_service_policy - fixed error in modify by changing resulting json + of an existing record in REST. + - na_ontap_snapmirror - when using REST with a policy, fix AttributeError - + 'str' object has no attribute 'get'. + - na_ontap_snapmirror - when using ZAPI, wait for the relationship to be quiesced + before breaking. + fragments: + - DEVOPS-5062.yaml + - DEVOPS-5063.yaml + - DEVOPS-5065.yaml + - DEVOPS-5068.yaml + release_date: '2022-05-11' + 21.2.0: + changes: + bugfixes: + - All REST modules - ONTAP 9.4 and 9.5 are incorrectly detected as supporting + REST with ``use_rest:auto``. + - na_ontap_igroup - report error when attempting to modify an option that cannot + be changed. + - na_ontap_lun - ``qos_policy_group`` could not be modified if a value was not + provided at creation. + - na_ontap_lun - tiering options were ignored in san_application_template. + - na_ontap_volume - report error from resize operation when using REST. + - na_ontap_volume - returns an error now if deleting a volume with REST api + fails. + minor_changes: + - azure_rm_netapp_account - new option ``active_directories`` to support SMB + volumes. + - azure_rm_netapp_volume - new option ``protocol_types`` to support SMB volumes. + - na_ontap_igroup - added REST support for ONTAP igroup creation, modification, + and deletion. + - na_ontap_lun - add ``comment`` option. + - na_ontap_lun - convert existing LUNs and supporting volume to a smart container + within a SAN application. + - na_ontap_lun - new option ``qos_adaptive_policy_group``. + - na_ontap_lun - new option ``scope`` to explicitly force operations on the + SAN application or a single LUN. + - na_ontap_node - added modify function for location and asset tag for node. + - na_ontap_snapmirror - add new options ``source_endpoint`` and ``destination_endpoint`` + to group endpoint suboptions. + - na_ontap_snapmirror - add new suboptions ``consistency_group_volumes`` and + ``ipspace`` to endpoint options. + - na_ontap_snapmirror - deprecate older options for source and destination paths, + volumes, vservers, and clusters. + - na_ontap_snapmirror - improve error reporting or warn when REST option is + not supported. + - na_ontap_snapmirror - report warning when relationship is present but not + healthy. + fragments: + - DEVOPS-3175.yaml + - DEVOPS-3479.yaml + - DEVOPS-3526.yaml + - DEVOPS-3535.yaml + - DEVOPS-3540.yaml + - DEVOPS-3542.yaml + - DEVOPS-3543.yaml + - DEVOPS-3579.yaml + - DEVOPS-3580.yaml + - DEVOPS-3595.yaml + - DEVOPS-3623.yaml + - DEVOPS-3625.yaml + - DEVOPS-3633.yaml + modules: + - description: NetApp Ontap - Add or remove CIFS local group member + name: na_ontap_cifs_local_group_member + namespace: '' + - description: NetApp ONTAP Log Forward Configuration + name: na_ontap_log_forward + namespace: '' + - description: NetApp ONTAP LUN maps reporting nodes + name: na_ontap_lun_map_reporting_nodes + namespace: '' + - description: NetApp Ontap enables, disables or modifies volume efficiency + name: na_ontap_volume_efficiency + namespace: '' + release_date: '2021-02-04' + 21.20.0: + changes: + bugfixes: + - na_ontap_autosupport - TypeError on ``ondemand_enabled`` field with ONTAP + 9.11. + - na_ontap_autosupport - TypeError on ``support`` field with ONTAP 9.11. + - na_ontap_autosupport - fix idempotency issue on ``state`` field with ONTAP + 9.11. + - na_ontap_cluster_config - fix the role to be able to create intercluster LIFs + with REST (ipspace is required). + - na_ontap_interface - ignore ``vserver`` when using REST if role is one of + 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. + - na_ontap_net_subnet - delete fails if ipspace is different than Default. + - na_ontap_nvme - fixed ``status_admin`` option is ignored if set to False when + creating nvme service in REST. + - na_ontap_nvme - fixed invalid boolean value error for ``status_admin`` when + creating nvme service in ZAPI. + - na_ontap_portset - fixed error when trying to remove partial ports from portset + if igroups are bound to it. + - na_ontap_portset - fixed idempotency issue when ``ports`` has identical values. + - na_ontap_quotas - fix another quota operation is currently in progress issue. + - na_ontap_quotas - fix idempotency issue on ``threshold`` option. + - na_ontap_service_policy - fixed error in modify by changing resulting json + of an existing record in REST. + - na_ontap_snapmirror - fix error in snapmirror restore by changing option ``clean_up_failure`` + as optional when using ZAPI. + - na_ontap_snapmirror - fix issues where there was no wait on quiesce before + aborting. + - na_ontap_snapmirror - fix issues where there was no wait on the relationship + to end transferring. + - na_ontap_snapmirror - support for SSL certificate authentication for both + sides when using ONTAP. + - na_ontap_snapmirror - when using REST with a policy, fix AttributeError - + 'str' object has no attribute 'get'. + - na_ontap_snapmirror - when using ZAPI, wait for the relationship to be quiesced + before breaking. + - na_ontap_software_update - now reports changed=False when the package is already + present. + - na_ontap_user - fix idempotency issue with SSH with second_authentication_method. + - na_ontap_vscan_on_access_policy - fixed options ``filters``, ``file_ext_to_exclude`` + and ``paths_to_exclude`` cannot be reset to empty values in ZAPI. + - na_ontap_zapit - fix failure in precluster mode. + minor_changes: + - na_ontap_aggregate - updated ``disk_types`` in documentation. + - na_ontap_cifs_server - Added ``security`` options in REST. + - na_ontap_export_policy_rule - Add ``from_rule_index`` for both REST and ZAPI. + Change ``rule_index`` to required. + - na_ontap_nvme_namespace - Added REST support. + - na_ontap_nvme_subsystem - Added REST support. + - na_ontap_portset - Added REST support. + - na_ontap_snapmirror - new option ``peer_options`` to define source connection + parameters. + - na_ontap_snapmirror - new option ``transferring_time_out`` to define how long + to wait for transfer to complete on create or initialize. + - na_ontap_snapmirror - rewrite update for REST using POST to initiate transfer. + - na_ontap_snapmirror - when deleting, attempt to delete even when the relationship + cannot be broken. + - na_ontap_software_update - added REST support. + - na_ontap_svm - Added documentation for ``allowed_protocol``, ndmp is default + in REST. + - na_ontap_user - add support for SAML authentication_method. + - na_ontap_vscan_on_access_policy - Added REST support. + - na_ontap_vscan_on_access_policy - new REST options ``scan_readonly_volumes`` + and ``only_execute_access`` added. + - na_ontap_vscan_on_demand_task - Added REST support. + - na_ontap_vserver_cifs_security - Added ``use_ldaps_for_ad_ldap`` and ``use_start_tls_for_ad_ldap`` + as mutually exclusive in ZAPI. + - na_ontap_vserver_cifs_security - Added option ``encryption_required_for_dc_connections`` + and ``use_ldaps_for_ad_ldap`` in ZAPI. + - na_ontap_vserver_cifs_security - fall back to ZAPI when ``use_rest`` is set + to ``auto`` or fail when REST is desired. + fragments: + - DEVOPS-4048.yaml + - DEVOPS-4449.yaml + - DEVOPS-4606.yaml + - DEVOPS-4780.yaml + - DEVOPS-4781.yaml + - DEVOPS-4784.yaml + - DEVOPS-4794.yaml + - DEVOPS-4801.yaml + - DEVOPS-4802.yaml + - DEVOPS-4803.yaml + - DEVOPS-4985.yaml + - DEVOPS-5079.yml + - DEVOPS-5082.yaml + - DEVOPS-5090.yaml + - DEVOPS-5109.yaml + - DEVOPS-5121.yaml + - DEVOPS-5127.yaml + - DEVOPS-5136.yaml + - DEVOPS-5137.yaml + - DEVOPS-5138.yaml + - DEVOPS-5161.yaml + modules: + - description: NetApp ONTAP S3 services + name: na_ontap_s3_services + namespace: '' + - description: NetApp ONTAP S3 users + name: na_ontap_s3_users + namespace: '' + release_date: '2022-06-08' + 21.21.0: + changes: + bugfixes: + - na_ontap_interface - FC interfaces - home_node should not be sent as location.home_node. + - na_ontap_interface - FC interfaces - home_port is not supported for ONTAP + 9.7 or earlier. + - na_ontap_interface - FC interfaces - scope is not supported. + - na_ontap_interface - FC interfaces - service_policy is not supported. + - na_ontap_interface - enforce requirement for address/netmask for interfaces + other than FC. + - na_ontap_interface - fix idempotency issue for cluster scoped interfaces when + using REST. + - na_ontap_interface - fix potential node and uuid issues with LIF migration. + - na_ontap_interface - ignore 'none' when using REST rather than reporting unexpected + protocol. + - na_ontap_lun - catch ZAPI error on get LUN. + - na_ontap_lun - ignore resize error if no change was required. + - na_ontap_lun - report error if flexvol_name is missing when using ZAPI. + - na_ontap_net_subnet - fixed ``ipspace`` option ignored in getting net subnet. + - na_ontap_qtree - fix idempotency issue on ``unix_permissions`` option. + - na_ontap_s3_buckets - Module will not fail on create if no ``policy`` is given. + - na_ontap_s3_buckets - Module will set ``enabled`` during create. + - na_ontap_s3_buckets - Module work currently when ``sid`` is a number. + - na_ontap_snapmirror - fix potential issue when destination is using REST but + source is using ZAPI. + - na_ontap_snapmirror - relax check for source when using REST. + - na_ontap_svm - KeyError on CIFS when using REST with ONTAP 9.8 or lower. + - na_ontap_volume - ``volume_security_style`` was not modified if other security + options were present with ZAPI. + - na_ontap_volume - fix idempotency issue on ``unix_permissions`` option. + - na_ontap_vserver_create role - add rule index as it is now required. + known_issues: + - na_ontap_snapshot - added documentation to use UTC format for ``expiry_time``. + minor_changes: + - na_ontap_cluster_config role - support ``broadcast_domain`` and ``service_policy`` + with REST. + - na_ontap_info - add computed serial_hex and naa_id for lun_info. + - na_ontap_info - add quota-policy-info. + - na_ontap_interface - support ``broadcast_domain`` with REST. + - na_ontap_login_messages - support cluster scope when using REST. + - na_ontap_lun - support ``qos_adaptive_policy_group`` with REST. + - na_ontap_motd - deprecated in favor of ``na_ontap_login_messages``. Fail + when use_rest is set to ``always`` as REST is not supported. + - na_ontap_ntp - new option ``key_id`` added. + - na_ontap_qtree - Added ``unix_user`` and ``unix_group`` options in REST. + - na_ontap_rest_info - add computed serial_hex and naa_id for storage/luns when + serial_number is present. + - na_ontap_s3_users - ``secret_key`` and ``access_token`` are now returned when + creating a user. + - na_ontap_service_processor_network - Added REST support. + - na_ontap_snapmirror - improve errror messages to be more specific and consistent. + - na_ontap_snapmirror - new option ``validate_source_path`` to disable this + validation. + - na_ontap_snapmirror - validate source endpoint for ZAPI and REST, accounting + for vserver local name. + - na_ontap_snapmirror - wait for the relationship to come back to idle after + a resync. + - na_ontap_unix_group - added REST support. + - na_ontap_unix_user - Added REST support. + - na_ontap_unix_user - Added new option ``primary_gid`` aliased to ``group_id``. + - na_ontap_user - accept ``service_processor`` as an alias for ``service-processor`` + with ZAPI, to be consistent with REST. + - na_ontap_volume - now defaults to REST with ``use_rest`` set to ``auto``, + like every other module. ZAPI can be forced with ``use_rest`` set to ``never``. + - na_ontap_vserver_create role - support ``broadcast_domain``, ``ipspace``, + and ``service_policy`` with REST. + fragments: + - DEVOPS-3632.yaml + - DEVOPS-4157.yaml + - DEVOPS-4336.yaml + - DEVOPS-4417.yaml + - DEVOPS-4790.yaml + - DEVOPS-4798.yaml + - DEVOPS-4799.yaml + - DEVOPS-4863.yaml + - DEVOPS-5084.yaml + - DEVOPS-5092.yaml + - DEVOPS-5152.yaml + - DEVOPS-5168.yaml + - DEVOPS-5174.yaml + - DEVOPS-5179.yaml + - DEVOPS-5188.yaml + - DEVOPS-5190.yaml + - DEVOPS-5215.yaml + - DEVOPS-5216.yaml + - DEVOPS-5220.yaml + - DEVOPS-5228.yaml + - DEVOPS-5229.yaml + - no-story-1.yaml + modules: + - description: NetApp ONTAP NTP key + name: na_ontap_ntp_key + namespace: '' + - description: NetApp ONTAP S3 groups + name: na_ontap_s3_groups + namespace: '' + - description: NetApp ONTAP S3 Policies + name: na_ontap_s3_policies + namespace: '' + release_date: '2022-07-12' + 21.22.0: + changes: + bugfixes: + - na_ontap_cluster_peer - report an error if there is an attempt to use the + already peered clusters. + - na_ontap_interface - fix error deleting fc interface if it is enabled in REST. + - na_ontap_license - fix intermittent KeyError when adding licenses with REST. + - na_ontap_lun - Added ``lun_modify`` after ``app_modify`` to fix idempotency + issue. + - na_ontap_name_service_switch - fix AttributeError 'NoneType' object has no + attribute 'get_children' if ``sources`` is '-' in current. + - na_ontap_name_service_switch - fix idempotency issue on ``sources`` option. + - na_ontap_security_key_manager - fix KeyError on ``node``. + - na_ontap_service_processor_network - allow manually configuring network if + all of ``ip_address``, ``netmask``, ''gateway_ip_address`` set and ``dhcp`` + not present in REST. + - na_ontap_service_processor_network - fail module when trying to disable ``dhcp`` + and not settting one of ``ip_address``, ``netmask``, ``gateway_ip_address`` + different than current. + - na_ontap_service_processor_network - fix ``wait_for_completion`` ignored when + trying to enable service processor network interface in ZAPI. + - na_ontap_service_processor_network - fix idempotency issue on ``dhcp`` option + in ZAPI. + - na_ontap_service_processor_network - fix setting ``dhcp`` v4 takes more than + ``wait_for_completion`` retries. + - na_ontap_software_update - improve error handling if image file is already + present. + - na_ontap_software_update - improve error handling when node is rebooting with + REST. + - na_ontap_software_update - when using REST with ONTAP 9.9 or later, timeout + value is properly set. + - na_ontap_user - enforce that all methods are under a single application. + - na_ontap_user - is_locked was not properly read with ZAPI, making the module + not idempotent. + minor_changes: + - all modules - do not fail on ZAPI EMS log when vserver does not exist. + - na_ontap_job_schedule - new option ``cluster`` added. + - na_ontap_ldap - fall back to ZAPI when ``use_rest`` is set to ``auto`` or + fail when REST is desired. + - na_ontap_ldap_client - Added REST support. + - na_ontap_ldap_client - Added ``ldaps_enabled`` option in ZAPI. + - na_ontap_license - return list of updated package names. + - na_ontap_name_service_switch - added REST support. + - na_ontap_nvme_subsystem - report subsystem as absent if vserver cannot be + found when attempting a delete. + - na_ontap_rest_info -- Will now include a message in return output about ``gather_subset`` + not supported by your version of ONTAP. + - na_ontap_rest_info -- Will now warn you if a ``gather_subset`` is not supported + by your version of ONTAP. + - na_ontap_security_key_manager - indicate that ``node`` is not used and is + deprecated. + - na_ontap_software_update - deleting a software package is now supported with + ZAPI and REST. + - na_ontap_svm - added vserver as a convenient alias for name when using module_defaults. + - na_ontap_wait_for_condition - added REST support. + - na_ontap_wait_for_condition - added ``snapmirror_relationship`` to wait on + ``state`` or ``transfer_state`` (REST only). + fragments: + - DEVOPS-1926.yaml + - DEVOPS-4691.yaml + - DEVOPS-4773.yaml + - DEVOPS-4776.yaml + - DEVOPS-4857.yaml + - DEVOPS-4882.yaml + - DEVOPS-5241.yaml + - DEVOPS-5243.yaml + - DEVOPS-5246.yaml + - DEVOPS-5263.yaml + - DEVOPS-5268.yaml + - DEVOPS-5270.yaml + - DEVOPS-5271.yaml + - DEVOPS-5287.yaml + - DEVOPS-5297.yaml + - DEVOPS-5299.yaml + - DEVOPS-5304.yaml + release_date: '2022-08-03' + 21.23.0: + changes: + bugfixes: + - na_ontap_cifs_acl - use ``type`` if present when fetching existing ACL with + ZAPI. + - na_ontap_cifs_local_user_set_password - when using ZAPI, do not require cluster + admin privileges. + - na_ontap_cluster_config Role - incorrect license was shown - updated to GNU + General Public License v3.0 + - na_ontap_flexcache - properly use ``origin_cluster`` in GET but not in POST + when using REST. + - na_ontap_kerberos_realm - fix cannot modify ``comment`` option in ZAPI. + - na_ontap_lun_copy - fix key error on ``source_vserver`` option. + - na_ontap_ntp - fixed typeError on ``key_id`` field with ZAPI. + - na_ontap_s3_buckets - fix TypeError if ``conditions`` not present in policy + statements. + - na_ontap_s3_buckets - fix options that cannot be modified if not set in creating + s3 buckets. + - na_ontap_s3_buckets - updated correct choices in options ``audit_event_selector.access`` + and ``audit_event_selector.permission``. + minor_changes: + - all REST modules - new option ``force_ontap_version`` to bypass permission + issues with custom vsadmin roles. + - na_ontap_cifs_local_user_set_password - Added REST support. + - na_ontap_cluster_ha - added REST support. + - na_ontap_export_policy_rule - ``rule_index`` is now optional for create and + delete. + - na_ontap_export_policy_rule - new option ``force_delete_on_first_match`` to + support duplicate entries on delete. + - na_ontap_interface - improved validations for unsupported options with FC + interfaces. + - na_ontap_kerberos_realm - added REST support. + - na_ontap_kerberos_realm - change ``kdc_port`` option type to int. + - na_ontap_lun_copy - added REST support. + - na_ontap_lun_map_reporting_nodes - added REST support. + - na_ontap_ntp - for ONTAP version 9.6 or below fall back to ZAPI when ``use_rest`` + is set to ``auto`` or fail when REST is desired. + - na_ontap_ntp_key - fail for ONTAP version 9.6 or below when ``use_rest`` is + set to ``auto`` or when REST is desired. + - na_ontap_rest_info - new option ``ignore_api_errors`` to report error in subset + rather than breaking execution. + - na_ontap_rest_info - support added for protocols/vscan/on-access-policies. + - na_ontap_rest_info - support added for protocols/vscan/on-demand-policies. + - na_ontap_rest_info - support added for protocols/vscan/scanner-pools. + - na_ontap_security_key_manager - added REST support. + - na_ontap_security_key_manager - new REST option ``onboard`` for onboard key + manager. + - na_ontap_security_key_manager - new REST options ``external`` and ``vserver`` + for external key manager. + - na_ontap_ucadapter - added REST support. + - na_ontap_user_role -- added REST support. + - na_ontap_volume - attempt to delete volume even when unmounting or offlining + failed. + fragments: + - DEVOPS-4197.yaml + - DEVOPS-4347.yaml + - DEVOPS-4716.yaml + - DEVOPS-4762.yaml + - DEVOPS-4763.yaml + - DEVOPS-4767.yaml + - DEVOPS-4771.yaml + - DEVOPS-4774.yaml + - DEVOPS-4775.yaml + - DEVOPS-4789.yaml + - DEVOPS-4800.yaml + - DEVOPS-5085.yaml + - DEVOPS-5223.yaml + - DEVOPS-5251.yaml + - DEVOPS-5285.yaml + - DEVOPS-5338.yaml + - DEVOPS-5367.yaml + - DEVOPS-5412.yaml + - DEVOPS-5413.yaml + - DEVOPS-5427.yaml + modules: + - description: NetApp ONTAP configuration for EMS event destination + name: na_ontap_ems_destination + namespace: '' + release_date: '2022-09-07' + 21.24.0: + changes: + bugfixes: + - na_ontap_cifs - fix KeyError on ``unix_symlink`` field when using REST. + - na_ontap_cifs_acl - use ``type`` when deleting unix-user or unix-group from + ACL in ZAPI. + - na_ontap_command - do not run command in check_mode (thanks to darksoul42). + - na_ontap_ems_destination - fix idempotency issue when ``type`` value is rest_api. + - na_ontap_interface - improve error message when interface type is required + with REST. + - na_ontap_qtree - fix KeyError on unix_permissions. + - na_ontap_rest_cli - do not run command in check_mode (thanks to darksoul42). + - na_ontap_s3_groups - if `policies` is None module should no longer fail + - na_ontap_user - fix idempotency issue with 9.11 because of new is_ldap_fastbind + field. + - na_ontap_volume_efficiency - Missing fields in REST get should return None + and not crash module. + minor_changes: + - All REST GET's up to and including 9.11.1 that do not require a UUID/KEY to + be past in are now supported + - na_ontap_cluster - ``timezone.name`` to modify cluster timezone. REST only. + - na_ontap_ems_destination - improve error messages - augment UT coverage (thanks + to bielawb). + - na_ontap_interface - ``dns_domain_name`` is now supported from ONTAP 9.9 or + later in REST. + - na_ontap_interface - ``is_dns_update_enabled`` is now supported from ONTAP + 9.9.1 or later in REST. + - na_ontap_interface - attempt to set interface_type to ``ip`` when ``protocols`` + is set to "none". + - na_ontap_net_subnet - added REST support. + - na_ontap_quotas - Added REST support. + - na_ontap_rest_info - Allowed the support of multiple subsets and warn when + using ``**`` in fields. + - na_ontap_rest_info - added support for ``network/ip/subnets``. + - na_ontap_rest_info - support added for cluster. + - na_ontap_rest_info - support added for cluster/counter/tables. + - na_ontap_rest_info - support added for cluster/licensing/capacity-pools. + - na_ontap_rest_info - support added for cluster/licensing/license-managers. + - na_ontap_rest_info - support added for cluster/metrocluster/svms. + - na_ontap_rest_info - support added for cluster/sensors. + - na_ontap_rest_info - support added for name-services/cache/group-membership/settings. + - na_ontap_rest_info - support added for name-services/cache/host/settings. + - na_ontap_rest_info - support added for name-services/cache/netgroup/settings. + - na_ontap_rest_info - support added for name-services/cache/setting. + - na_ontap_rest_info - support added for name-services/cache/unix-group/settings. + - na_ontap_rest_info - support added for name-services/ldap-schemas. + - na_ontap_rest_info - support added for network/fc/fabrics. + - na_ontap_rest_info - support added for network/fc/interfaces. + - na_ontap_rest_info - support added for network/fc/interfaces. + - na_ontap_rest_info - support added for network/ip/subnets. + - na_ontap_rest_info - support added for protocols/cifs/connections. + - na_ontap_rest_info - support added for protocols/cifs/netbios. + - na_ontap_rest_info - support added for protocols/cifs/session/files. + - na_ontap_rest_info - support added for protocols/cifs/shadow-copies. + - na_ontap_rest_info - support added for protocols/cifs/shadowcopy-sets. + - na_ontap_rest_info - support added for protocols/nfs/connected-client-maps. + - na_ontap_rest_info - support added for security. + - na_ontap_rest_info - support added for security/multi-admin-verify. + - na_ontap_rest_info - support added for security/multi-admin-verify/approval-groups. + - na_ontap_rest_info - support added for security/multi-admin-verify/requests. + - na_ontap_rest_info - support added for security/multi-admin-verify/rules. + - na_ontap_rest_info - support added for storage/file/moves. + - na_ontap_rest_info - support added for storage/pools. + - na_ontap_restit - support multipart/form-data for read and write. + - na_ontap_security_ssh - Updates the SSH server configuration for the specified + SVM - REST only. + - na_ontap_snmp_traphosts - Added ``host`` option in REST. + - na_ontap_svm - Added ``ndmp`` option to services in REST. + - na_ontap_vserver_create - ``firewall_policy`` is not set when ``service_policy`` + is present, as ``service_policy`` is preferred. + - na_ontap_vserver_create - ``protocol`` is now optional. ``role`` is not set + when protocol is absent. + - na_ontap_vserver_create - added ``interface_type``. Only a value of ``ip`` + is currently supported. + - na_ontap_vserver_create - added support for vserver management interface when + using REST. + fragments: + - DEVOPS-4788.yaml + - DEVOPS-4862.yaml + - DEVOPS-5017.yaml + - DEVOPS-5195.yaml + - DEVOPS-5275.yaml + - DEVOPS-5344.yaml + - DEVOPS-5354.yaml + - DEVOPS-5380.yaml + - DEVOPS-5414.yaml + - DEVOPS-5426.yaml + - DEVOPS-5430.yaml + - DEVOPS-5453.yaml + - DEVOPS-5457.yaml + - DEVOPS-5479.yaml + - DEVOPS-5481.yaml + - DEVOPS-5484.yaml + - DEVOPS-5485.yaml + - DEVOPS-5487.yaml + - DEVOPS-5503.yaml + - DEVOPS-5504.yaml + - DEVOPS-5505.yaml + - DEVOPS-5506.yaml + modules: + - description: NetApp ONTAP security ssh + name: na_ontap_security_ssh + namespace: '' + release_date: '2022-10-05' + 21.24.1: + changes: + bugfixes: + - new meta/execution-environment.yml is failing ansible-builder sanitize step. + fragments: + - DEVOPS-5540.yaml + release_date: '2022-10-06' + 21.3.0: + changes: + bugfixes: + - na_ontap_ldap_client - ``port`` was incorrectly used instead of ``tcp_port``. + - na_ontap_node - KeyError fix for location ans asset-tag parameters in get_node(). + - na_ontap_snapmirror - SVM scoped policies were not found when using a destination + path with REST application. + - na_ontap_volume - changes in ``encrypt`` settings were ignored. + - na_ontap_volume - unmount volume before deleting it when using REST. + minor_changes: + - na_ontap_debug - improve error reporting for import errors on netapp_lib. + - na_ontap_flexcache - mount/unmount the FlexCache volume when using REST. + - na_ontap_flexcache - support REST APIs in addition to ZAPI for create and + delete. + - na_ontap_flexcache - support for ``prepopulate`` option when using REST (requires + ONTAP 9.8). + - na_ontap_igroups - new option ``igroups`` to support nested igroups (requires + ONTAP 9.9). + - na_ontap_info - improve error reporting for import errors on netapp_lib, json, + xlmtodict. + - na_ontap_motd - deprecated module warning and to use na_ontap_login_messages. + - na_ontap_volume - new suboption ``dr_cache`` when creating flexcache using + NAS application template. + - na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume + efficiency when it does not exist and apply additional parameters. + - na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume + efficiency when it does not exist. + fragments: + - DEVOPS-2353.yaml + - DEVOPS-3536.yaml + - DEVOPS-3626.yaml + - DEVOPS-3654.yaml + - DEVOPS-3655.yaml + - DEVOPS-3662.yaml + - DEVOPS-3667.yaml + - DEVOPS-3668.yaml + - DEVOPS-3671.yaml + - DEVOPS-3677.yaml + - DEVOPS-3685.yaml + - DEVOPS-3716.yaml + - DEVOPS-3718.yaml + modules: + - description: NetApp ONTAP domain tunnel + name: na_ontap_domain_tunnel + namespace: '' + - description: NetApp ONTAP - Create, delete or modify an FPolicy policy. + name: na_ontap_fpolicy_policy + namespace: '' + - description: NetApp ONTAP modify security config for SSL. + name: na_ontap_security_config + namespace: '' + - description: Enables or disables NetApp ONTAP storage auto giveback for a specified + node + name: na_ontap_storage_auto_giveback + namespace: '' + - description: Enables or disables NetApp Ontap storage failover for a specified + node + name: na_ontap_storage_failover + namespace: '' + release_date: '2021-03-03' + 21.3.1: + changes: + bugfixes: + - na_ontap_snapmirror - check for consistency_group_volumes always fails on + 9.7, and cluster or ipspace when using endpoints with ZAPI. + fragments: + - DEVOPS-3754.yaml + release_date: '2021-03-09' + 21.4.0: + changes: + bugfixes: + - na_ontap_autosupport - warn when password is present in ``proxy_url`` as it + makes the operation not idempotent. + - na_ontap_cluster - ignore ZAPI EMS log error when in pre-cluster mode. + - na_ontap_lun - SAN application is not supported on 9.6 and only partially + supported on 9.7 (no modify). + - na_ontap_svm - iscsi current status is not read correctly (mispelled issi). + minor_changes: + - na_ontap_igroups - new option ``initiator_names`` as a replacement for ``initiators`` + (still supported as an alias). + - na_ontap_igroups - new option ``initiator_objects`` to support initiator comments + (requires ONTAP 9.9). + - na_ontap_lun - allow new LUNs to use different igroup or os_type when using + SAN application. + - na_ontap_lun - ignore small increase (lower than provisioned) and small decrease + (< 10%) in ``total_size``. + - na_ontap_node - added REST support for ONTAP node modify and rename. + - na_ontap_volume - warn when attempting to modify application only options. + - na_ontap_volume_efficiency - new option 'start_ve_build_metadata' scan the + entire and generate fingerprint database. + - na_ontap_volume_efficiency - new option 'start_ve_delete_checkpoint' delete + checkpoint and start the operation from the begining. + - na_ontap_volume_efficiency - new option 'start_ve_qos_policy' defines the + QoS policy for the operation. + - na_ontap_volume_efficiency - new option 'start_ve_queue_operation' queue if + an exisitng operation is already running. + - na_ontap_volume_efficiency - new option 'start_ve_scan_all' scan the entire + volume without applying share block optimization. + - na_ontap_volume_efficiency - new option 'start_ve_scan_old_data' scan the + file system to process all the existing data. + - na_ontap_volume_efficiency - new option 'stop_ve_all_operations' all running + and queued operations to be stopped. + - na_ontap_volume_efficiency - new option to allow volume efficiency to be started + and stopped 'volume_efficiency'. + fragments: + - DEVOPS-3571.yaml + - DEVOPS-3628.yaml + - DEVOPS-3649.yaml + - DEVOPS-3757.yaml + - DEVOPS-3767.yaml + - DEVOPS-3772.yaml + - DEVOPS-3801.yaml + - DEVOPS-3811.yaml + - DEVOPS-3812.yml + modules: + - description: NetApp ONTAP modify local CIFS user. + name: na_ontap_cifs_local_user_modify + namespace: '' + - description: NetApp ONTAP modify storage disk options + name: na_ontap_disk_options + namespace: '' + - description: NetApp ONTAP FPolicy policy event configuration + name: na_ontap_fpolicy_event + namespace: '' + - description: NetApp ONTAP fPolicy external engine configuration. + name: na_ontap_fpolicy_ext_engine + namespace: '' + - description: NetApp ONTAP - Create, delete or modify an FPolicy policy scope + configuration. + name: na_ontap_fpolicy_scope + namespace: '' + - description: NetApp ONTAP - Enables or disables the specified fPolicy policy + name: na_ontap_fpolicy_status + namespace: '' + - description: NetApp ONTAP Sets the snaplock compliance clock. + name: na_ontap_snaplock_clock + namespace: '' + release_date: '2021-04-07' + 21.5.0: + changes: + bugfixes: + - na_ontap_qtree - wait for completion when creating or modifying a qtree with + REST. + - na_ontap_volume - ignore read error because of insufficient privileges for + efficiency options so that the module can be run as vsadmin. + major_changes: + - na_ontap_autosupport - Added REST support to the module. + minor_changes: + - na_ontap_autosupport - new option ``local_collection_enabled`` to specify + whether collection of AutoSupport data when the AutoSupport daemon is disabled. + - na_ontap_autosupport - new option ``max_http_size`` to specify delivery size + limit for the HTTP transport protocol (in bytes). + - na_ontap_autosupport - new option ``max_smtp_size`` to specify delivery size + limit for the SMTP transport protocol (in bytes). + - na_ontap_autosupport - new option ``nht_data_enabled`` to specify whether + the disk health data is collected as part of the AutoSupport data. + - na_ontap_autosupport - new option ``ondemand_enabled`` to specify whether + the AutoSupport OnDemand Download feature is enabled. + - na_ontap_autosupport - new option ``perf_data_enabled`` to specify whether + the performance data is collected as part of the AutoSupport data. + - na_ontap_autosupport - new option ``private_data_removed`` to specify the + removal of customer-supplied data. + - na_ontap_autosupport - new option ``reminder_enabled`` to specify whether + AutoSupport reminders are enabled or disabled. + - na_ontap_autosupport - new option ``retry_count`` to specify the maximum number + of delivery attempts for an AutoSupport message. + - na_ontap_autosupport - new option ``validate_digital_certificate`` which when + set to true each node will validate the digital certificates that it receives. + - na_ontap_info - Added "autosupport_check_info" to the attributes that will + be collected when gathering info using the module. + fragments: + - DEVOPS-3830.yaml + - DEVOPS-3850.yaml + - DEVOPS-3870.yaml + - DEVOPS-3883.yaml + release_date: '2021-04-21' + 21.6.0: + changes: + bugfixes: + - na_ontap_autosupport - TypeError - '>' not supported between instances of + 'str' and 'list'. + - na_ontap_quotas - fail to reinitialize on create if quota is already on. + minor_changes: + - na_ontap_rest_info - Added "autosupport_check_info"/"support/autosupport/check" + to the attributes that will be collected when gathering info using the module. + - na_ontap_users - new option ``application_dicts`` to associate multiple authentication + methods to an application. + - na_ontap_users - new option ``application_strs`` to disambiguate ``applications``. + - na_ontap_users - new option ``replace_existing_apps_and_methods``. + - na_ontap_users - new suboption ``second_authentication_method`` with ``application_dicts`` + option. + - na_ontap_vserver_peer - new options ``local_name_for_source`` and ``local_name_for_peer`` + added. + fragments: + - DEVOPS-3241.yaml + - DEVOPS-3807.yaml + - DEVOPS-3900.yaml + - DEVOPS-3926.yaml + - DEVOPS-3950.yaml + release_date: '2021-05-06' + 21.6.1: + changes: + bugfixes: + - na_ontap_autosupport - KeyError - No element by given name validate-digital-certificate. + fragments: + - DEVOPS-3971.yaml + release_date: '2021-05-11' + 21.7.0: + changes: + bugfixes: + - na_ontap_flexcache - one occurrence of msg missing in call to fail_json. + - na_ontap_igroup - one occurrence of msg missing in call to fail_json. + - na_ontap_igroups - nested igroups are not supported on ONTAP 9.9.0 but are + on 9.9.1. + - na_ontap_iscsi_security - IndexError list index out of range if vserver does + not exist + - na_ontap_iscsi_security - cannot change authentication_type + - na_ontap_lun - three occurrencse of msg missing in call to fail_json. + - na_ontap_lun_map_reporting_nodes - one occurrence of msg missing in call to + fail_json. + - na_ontap_snapmirror - one occurrence of msg missing in call to fail_json. + minor_changes: + - License displayed correctly in Github + - na_ontap_cifs - new option ``comment`` to associate a description to a CIFS + share. + - na_ontap_disks - added REST support for the module. + - na_ontap_disks - added functionality to reassign spare disks from a partner + node to the desired node. + - na_ontap_disks - new option min_spares. + - na_ontap_lun - new suboption ``exclude_aggregates`` for SAN application. + - na_ontap_volume - new suboption ``exclude_aggregates`` for NAS application. + fragments: + - DEVOPS-3952.yaml + - DEVOPS-3969.yaml + - DEVOPS-3973.yaml + - DEVOPS-3983.yaml + - DEVOPS-3994.yaml + - DEVOPS-4005.yaml + - DEVOPS-4010.yaml + modules: + - description: NetApp ONTAP publickey configuration + name: na_ontap_publickey + namespace: '' + - description: NetApp ONTAP service policy configuration + name: na_ontap_service_policy + namespace: '' + release_date: '2021-06-07' + 21.8.0: + changes: + bugfixes: + - all modules - fix traceback TypeError 'NoneType' object is not subscriptable + when hostname points to a web server. + - na_ontap_cluster_peer - KeyError on dest_cluster_name if destination is unreachable. + - na_ontap_cluster_peer - KeyError on username when using certicate. + - na_ontap_export_policy_rule - change ``anonymous_user_id`` type to str to + accept user name and user id. (A warning is now triggered when a number + is not quoted.) + - na_ontap_volume_clone - ``parent_vserver`` can not be given with ``junction_path``, + ``uid``, or ``gid`` + - na_ontap_vserver_peer - KeyError on username when using certicate. + minor_changes: + - na_ontap_cluster_peer - new option ``peer_options`` to use different credentials + on peer. + - na_ontap_debug - additional checks when REST is available to help debug vserver + connectivity issues. + - na_ontap_flexcache - corrected module name in documentation Examples + - na_ontap_net_port - change option types to bool and int respectively for ``autonegotiate_admin`` + and ``mtu``. + - na_ontap_net_port - new option ``up_admin`` to set administrative state. + - na_ontap_rest_info - add examples for ``parameters`` option. + - na_ontap_snapshot - add REST support to create, modify, rename, and delete + snapshot. + - na_ontap_snapshot - new option ``expiry_time``. + - na_ontap_volume - show warning when resize is ignored because threshold is + not reached. + - na_ontap_vserver_create role - add ``nfsv3``, ``nfsv4``, ``nfsv41`` options. + - na_ontap_vserver_peer - new option ``peer_options`` to use different credentials + on peer. + fragments: + - DEVOPS-3483.yaml + - DEVOPS-3534.yaml + - DEVOPS-3615.yaml + - DEVOPS-3939.yaml + - DEVOPS-4022.yaml + - DEVOPS-4026.yaml + - DEVOPS-4039.yaml + - DEVOPS-4049.yaml + - DEVOPS-4060.yaml + - DEVOPS-4113.yaml + - DEVOPS-4114.yml + modules: + - description: NetApp ONTAP set local CIFS user password + name: na_ontap_cifs_local_user_set_password + namespace: '' + - description: NetApp ONTAP create or remove a File Directory security descriptor. + name: na_ontap_fdsd + namespace: '' + - description: NetApp ONTAP create or delete a file directory security policy + name: na_ontap_fdsp + namespace: '' + - description: NetApp ONTAP create, delete or modify File Directory security policy + tasks + name: na_ontap_fdspt + namespace: '' + - description: NetApp ONTAP File Directory Security Set. + name: na_ontap_fdss + namespace: '' + - description: NetApp ONTAP Assign partitions and disks to nodes. + name: na_ontap_partitions + namespace: '' + release_date: '2021-07-14' + 21.8.1: + changes: + bugfixes: + - all REST modules - 9.4 and 9.5 were incorrectly detected as supporting REST. + - na_ontap_snapmirror - improve error message when option is not supported with + ZAPI. + fragments: + - DEVOPS-4150.yaml + release_date: '2021-07-20' + 21.9.0: + changes: + bugfixes: + - na_ontap_job_schedule - fix documentation for REST ranges for months. + - na_ontap_object_store - when using REST, wait for job status to correctly + report errors. + - na_ontap_quotas - attempt to retry on ``13001:success`` ZAPI error. Add debug + data. + - na_ontap_rest_cli - removed incorrect statement indicating that console access + is required. + minor_changes: + - na_ontap_job_schedule - new option ``month_offset`` to explictly select 0 + or 1 for January. + - na_ontap_object_store - new option ``port``, ``certificate_validation_enabled``, + ``ssl_enabled`` for target server. + - na_ontap_rest_info - All Info that exist in ``na_ontap_info`` that has REST + equivalents have been implemented. Note that the returned structure for REST + and the variable names in the structure is different from the ZAPI based ``na_ontap_info``. + Some default variables in ZAPI are no longer returned by default in REST and + will need to be specified using the ``field`` option. + - na_ontap_rest_info - The Default for ``gather_subset`` has been changed to + demo which returns ``cluster/software``, ``svm/svms``, ``cluster/nodes``. + To return all Info must specificly list ``all`` in your playbook. Do note + ``all`` is a very resource-intensive action and it is highly recommended to + call just the info/APIs you need. + - na_ontap_rest_info - The following info subsets have been added ``system_node_info``, + ``net_interface_info``, ``net_port_info``, ``security_login_account_info``, + ``vserver_peer_info``, ``cluster_image_info``, ``cluster_log_forwarding_info``, + ``metrocluster_info``, ``metrocluster_node_info``, ``net_dns_info``, ``net_interface_service_policy_info``, + ``vserver_nfs_info``, ``clock_info``, ``igroup_info``, ``vscan_status_info``, + ``vscan_connection_status_all_info``, ``storage_bridge_info``, ``nvme_info``, + ``nvme_interface_info``, ``nvme_subsystem_info``, ``cluster_switch_info``, + ``export_policy_info``, ``kerberos_realm_info``,``sis_info``, ``sis_policy_info``, + ``snapmirror_info``, ``snapmirror_destination_info``, ``snapmirror_policy_info``, + ``sys_cluster_alerts``, ``cifs_vserver_security_info`` + - na_ontap_rest_info - added file_directory_security to return the effective + permissions of the directory. When using file_directory_security it must be + called with gather_subsets and path and vserver must be specified in parameters. + - na_ontap_rest_info - new option ``use_python_keys`` to replace ``svm/svms`` + with ``svm_svms`` to simplify post processing. + - na_ontap_snmp - Added REST support to the SNMP module + fragments: + - DEVOPS-4031.yaml + - DEVOPS-4116.yaml + - DEVOPS-4122.yaml + - DEVOPS-4140.yaml + - DEVOPS-4159.yaml + - DEVOPS-4161.yaml + - DEVOPS-4177.yaml + - DEVOPS-4191.yaml + release_date: '2021-08-03' + 22.0.0: + changes: + bugfixes: + - iso8601 filters - fix documentation generation issue. + - na_ontap_firmware_upgrade - when enabled, disruptive_update would always update + even when update is not required. + - na_ontap_info - Added vserver in key_fields of net_interface_info. + - na_ontap_interface - fix error where an ``address`` with an IPV6 ip would + try to modify each time playbook was run. + - na_ontap_ldap_client - ``servers`` not accepted when using ZAPI and ``ldap_servers`` + not handling a single server properly. + - na_ontap_rest_info - fixed error where module would fail silently when using + ``owning_resouce`` and a non-existent vserver. + - na_ontap_user_role - fixed Invalid JSON input. Expecting "privileges" to be + an array. + - na_ontap_volume - ``snapdir_access`` is not supported by REST and will currently + inform you now if you try to use it with REST. + - na_ontap_volume - fix KeyError on ``aggregate_name`` when trying to unencrypt + volume in ZAPI. + - na_ontap_volume - fix error when trying to move encrypted volume and ``encrypt`` + is True in REST. + - na_ontap_volume - fix error when trying to unencrypt volume in REST. + - na_ontap_volume - when deleting a volume, don't report a warning when unmount + is successful (error is None). + - tracing - redact headers and authentication secrets by default. + minor_changes: + - na_ontap_autosupport_invoke - warn when ``message`` alias is used as it will + be removed - it conflicts with Ansible internal variable. + - na_ontap_debug - report python executable version and path. + - na_ontap_export_policy_rule - ``allow_device_creation`` and ``chown_mode`` + is now supported in ZAPI. + - na_ontap_export_policy_rule - ``allow_suid``, ``allow_device_creation`` and + ``chown_mode`` is now supported from ONTAP 9.9.1 or later in REST. + - na_ontap_ldap_client - new option ``skip_config_validation``. + - na_ontap_login_message - warn when ``message`` alias is used as it will be + removed - it conflicts with Ansible internal variable. + - na_ontap_motd - warn when ``message`` alias is used as it will be removed + - it conflicts with Ansible internal variable. + - na_ontap_net_routes - ``metric`` option is supported from ONTAP 9.11.0 or + later in REST. + - na_ontap_nfs - warn when ``nfsv4.1`` alias is used as it will be removed - + it does not match Ansible naming convention. + - na_ontap_rest_info - support added for protocols/active-directory. + - na_ontap_rest_info - support added for protocols/cifs/group-policies. + - na_ontap_rest_info - support added for protocols/nfs/connected-client-settings. + - na_ontap_rest_info - support added for security/aws-kms. + - na_ontap_service_policy - new options ``known_services`` and ``additional_services``. + - na_ontap_service_policy - update services for 9.11.1 - make it easier to add + new services. + - na_ontap_snapmirror - ``schedule`` is handled through ``policy`` for REST. + - na_ontap_snapmirror_policy - ``name`` added as an alias for ``policy_name``. + - na_ontap_snapmirror_policy - improve error reporting and report errors in + check_mode. + - na_ontap_snapmirror_policy - new option ``identity_preservation`` added. + - na_ontap_volume - ``wait_for_completion`` and ``check_interval`` is now supported + for volume move and encryption in REST. + - na_ontap_volume - new REST option ``analytics`` added. + - na_ontap_volume - new option ``max_wait_time`` added. + - tracing - allow to selectively trace headers and authentication. + fragments: + - DEVOPS-4348.yaml + - DEVOPS-4367.yaml + - DEVOPS-4644.yaml + - DEVOPS-5409.yaml + - DEVOPS-5431.yaml + - DEVOPS-5531.yaml + - DEVOPS-5532.yaml + - DEVOPS-5537.yaml + - DEVOPS-5548.yaml + - DEVOPS-5592.yaml + - DEVOPS-5594.yaml + - DEVOPS-5595.yaml + - DEVOPS-5596.yaml + - DEVOPS-5611.yaml + - DEVOPS-5626.yaml + - DEVOPS-5629.yaml + - github-110.yaml + modules: + - description: NetApp ONTAP module to create, modify or delete bgp peer group. + name: na_ontap_bgp_peer_group + namespace: '' + - description: NetApp ONTAP NTFS file security permissions + name: na_ontap_file_security_permissions + namespace: '' + - description: NetApp ONTAP file security permissions ACL + name: na_ontap_file_security_permissions_acl + namespace: '' + - description: NetApp ONTAP local hosts + name: na_ontap_local_hosts + namespace: '' + - description: NetApp ONTAP name mappings + name: na_ontap_name_mappings + namespace: '' + release_date: '2022-11-02' + 22.0.1: + changes: + bugfixes: + - na_ontap_interface - fix ``netmask`` not idempotent in REST. + - na_ontap_mcc_mediator - Fix error that would prevent mediator deletion, + minor_changes: + - na_ontap_interface - allow setting ``netmask`` with netmask length in ZAPI. + fragments: + - DEVOPS-5589.yaml + - DEVOPS-5662.yaml + release_date: '2022-11-10' + 22.1.0: + changes: + bugfixes: + - na_ontap_active_directory - updated doc as only ZAPI is supported at present, + force an error with use_rest always. + - na_ontap_aggregate - allow adding disks before trying to offline aggregate. + - na_ontap_aggregate - fix ``service_state`` option skipped if its set to offline + in create. + - na_ontap_cg_snapshot - updated doc with deprecation warning as it is a ZAPI + only module. + - na_ontap_cifs_server - fix ``service_state`` is stopped when trying to modify + cifs server in REST. + - na_ontap_file_directory_policy - updated doc with deprecation warning as it + is a ZAPI only module. + - na_ontap_file_security_permissions - updated notes to indicate ONTAP 9.9.1 + or later is required. + - na_ontap_file_security_permissions_acl - updated notes to indicate ONTAP 9.9.1 + or later is required. + - na_ontap_interface - fix cannot set ``location.node.name`` and ``location.home_node.name`` + error when creating or modifying fc interface. + - na_ontap_interface - fix unexpected argument error with ``ipspace`` when trying + to get fc interface. + - na_ontap_qtree - fix cannot get current qtree if enclosed in curly braces. + - na_ontap_quota_policy - updated doc with deprecation warning as it is a ZAPI + only module. + - na_ontap_quotas - fix default tree quota rule gets modified when ``quota_target`` + is set in REST. + - na_ontap_quotas - fix user/group quota rule without qtree gets modified when + ``qtree`` is set. + - na_ontap_snapmirror_policy - fixed idempotency issue on ``identity_preservation`` + option when using REST. + - na_ontap_svm_options - updated doc with deprecation warning as it is a ZAPI + only module. + minor_changes: + - na_ontap_aggregate - add ``name`` to modify in module output if aggregate + is renamed. + - na_ontap_aggregate - add support for ``service_state`` option from ONTAP 9.11.1 + or later in REST. + - na_ontap_aggregate - error if ``unmount_volumes`` set in REST, by default + REST unmount volumes when trying to offline aggregate. + - na_ontap_aggregate - fix examples in documentation. + - na_ontap_cifs_local_group_member - Added REST API support to retrieve, add + and remove CIFS group member. + - na_ontap_cifs_local_group_member - REST support is from ONTAP 9.10.1 or later. + - na_ontap_cifs_server - skip ``service_state`` option if not set in create. + - na_ontap_interface - error when try to migrate fc interface in REST. + - na_ontap_interface - new option ``probe_port`` for Azure load balancer. + - na_ontap_quotas - for qtree type, allow quota_target in path format /vol/vol_name/qtree_name + in REST. + - na_ontap_snapmirror_policy - new option ``copy_all_source_snapshots`` added + in REST. + - na_ontap_volume - report error if vserver does not exist or is not a data + vserver on create. + fragments: + - DEVOPS-5604.yaml + - DEVOPS-5659.yaml + - DEVOPS-5665.yaml + - DEVOPS-5666.yaml + - DEVOPS-5677.yaml + - DEVOPS-5678.yaml + - DEVOPS-5696.yaml + - DEVOPS-5711.yaml + - DEVOPS-5713.yaml + - DEVOPS-5733.yaml + - DEVOPS-5734.yaml + modules: + - description: NetApp Ontap - create, delete or modify CIFS local group. + name: na_ontap_cifs_local_group + namespace: '' + - description: NetApp ONTAP module to add or delete ipsec ca certificate. + name: na_ontap_security_ipsec_ca_certificate + namespace: '' + - description: NetApp ONTAP module to configure IPsec config. + name: na_ontap_security_ipsec_config + namespace: '' + - description: NetApp ONTAP module to create, modify or delete security IPsec + policy. + name: na_ontap_security_ipsec_policy + namespace: '' + release_date: '2022-12-07' + 22.2.0: + changes: + bugfixes: + - na_ontap_quotas - fix duplicate entry error when trying to add quota rule + in REST. + - na_ontap_quotas - fix entry does not exist error when trying to modify quota + status in REST. + - na_ontap_security_ipsec_policy - fix KeyError on ``authentication_method``. + - na_ontap_security_ipsec_policy - fix cannot get current security IPsec policy + with ipspace. + - na_ontap_security_key_manager - requires 9.7+ to work with REST. + - na_ontap_snapmirror_policy - deleting all retention rules would trigger an + error when the existing policy requires at least one rule. + - na_ontap_snapmirror_policy - fix desired policy type not configured in cli + with REST. + - na_ontap_snapmirror_policy - index error on rules with ONTAP 9.12.1 as not + all fields are present. + - na_ontap_volume -- fixed bug preventing unmount and taking a volume off line + at the same time + minor_changes: + - na_ontap_active_directory - REST requires ONTAP 9.12.1 or later. + - na_ontap_active_directory - add ``fqdn`` as aliases for ``domain``. + - na_ontap_interface - new option ``fail_if_subnet_conflicts`` - requires REST + and ONTAP 9.11.1 or later. + - na_ontap_interface - option ``subnet_name`` is now supported with REST with + ONTAP 9.11.1 or later. + - na_ontap_iscsi - new option ``target_alias`` added in REST. + - na_ontap_snapmirror - support ``schedule`` with REST and ONTAP 9.11.1, add + alias ``transfer_schedule``. + - na_ontap_snapmirror_policy - Added new choices sync and async for policy type + in REST. + - na_ontap_snapmirror_policy - Added unsupported options in ZAPI. + - na_ontap_snapmirror_policy - add support for cluster scoped policy with REST. + - na_ontap_snapmirror_policy - new option ``copy_latest_source_snapshot``, ``create_snapshot_on_source`` + and ``sync_type`` added in REST. + - na_ontap_snapmirror_policy - new option ``transfer_schedule`` for async policy + types. + - na_ontap_snapmirror_policy - warn when replacing policy type ``async_mirror``, + ``mirror_vault`` and ``vault`` with policy type ``async`` and ``strict_sync_mirror``, + ``sync_mirror`` with ``sync`` in REST. + - na_ontap_svm - warn in case of mismatch in language option spelling. + fragments: + - DEVOPS-5507.yaml + - DEVOPS-5606.yaml + - DEVOPS-5671.yaml + - DEVOPS-5725.yaml + - DEVOPS-5735.yaml + - DEVOPS-5737.yaml + - DEVOPS-5760.yaml + - DEVOPS-5761.yaml + - DEVOPS-5774.yaml + - DEVOPS-5784.yaml + - DEVOPS-5788.yaml + modules: + - description: NetApp ONTAP local CIFS user. + name: na_ontap_cifs_local_user + namespace: '' + release_date: '2023-01-03' + 22.3.0: + changes: + bugfixes: + - na_ontap_aggregate - try to offline aggregate when disk add operation is in + progress in ZAPI. + - na_ontap_interface - fix idempotency issue when ``home_port`` not set in creating + FC interface. + - na_ontap_rest_info - fix field issue with private/cli and support/autosupport/check + APIs. + - na_ontap_snapshot - fix cannot modify ``snapmirror_label``, ``expiry_time`` + and ``comment`` if not configured in create. + - na_ontap_user_role - fix AttributeError 'NetAppOntapUserRole' object has no + attribute 'name'. + - na_ontap_user_role - fix KeyError on ``vserver``, ``command_directory_name`` + in ZAPI and ``path``, ``query`` in REST. + - na_ontap_user_role - fix duplicate entry error in ZAPI. + - na_ontap_user_role - fix entry does not exist error when trying to delete + privilege in REST. + - na_ontap_volume_efficiency - fix idempotent issue when state is absent and + efficiency options are set in ZAPI. + minor_changes: + - na_ontap_aggregate - new option ``allow_flexgroups`` added. + - na_ontap_cifs - new options ``access_based_enumeration``, ``change_notify``, + ``encryption``, ``home_directory``, ``oplocks``, ``show_snapshot``, ``allow_unencrypted_access``, + ``namespace_caching`` and ``continuously_available`` added in REST. + - na_ontap_dns - ``skip_validation`` option requires 9.9.1 or later with REST + and ignored for cluster DNS operations. + - na_ontap_dns - support cluster scope for modify and delete. + - na_ontap_interface - do not attempt to migrate FC interface if desired ``home_port``, + ``home_node`` and ``current_port``, ``current_node`` are same. + - na_ontap_license - support for NLF v2 license files. + - na_ontap_nfs - new options ``root``, ``windows`` and ``security`` added in + REST. + - na_ontap_user_role - ``command_directory_name`` is required if ``privileges`` + not set in REST. + - na_ontap_user_role - ``path`` is required if ``privileges`` set in REST. + - na_ontap_volume_efficiency - REST support for ``policy`` requires 9.7 or later, + ``path`` requires 9.9.1 or later and ``volume_efficiency`` and ``start_ve_scan_old_data`` + requires 9.11.1 or later. + - na_ontap_volume_efficiency - ``schedule``, ``start_ve_scan_all``, ``start_ve_build_metadata``, + ``start_ve_delete_checkpoint``, ``start_ve_queue_operation``, ``start_ve_qos_policy`` + and ``stop_ve_all_operations`` options are not supported with REST. + - na_ontap_volume_efficiency - new option ``volume_name`` added. + - na_ontap_volume_efficiency - updated private cli with REST API. + fragments: + - DEVOPS-5189.yaml + - DEVOPS-5312.yaml + - DEVOPS-5415.yaml + - DEVOPS-5536.yaml + - DEVOPS-5628.yaml + - DEVOPS-5738.yaml + - DEVOPS-5757.yaml + - DEVOPS-5790.yaml + - DEVOPS-5807.yaml + - DEVOPS-5809.yml + - DEVOPS-5812.yaml + - DEVOPS-5819.yaml + - DEVOPS-5820.yaml + - DEVOPS-5844.yaml + modules: + - description: NetApp Ontap - create, delete or modify vserver audit configuration. + name: na_ontap_vserver_audit + namespace: '' + - description: NetApp Ontap - create, delete or modify vserver peer permission. + name: na_ontap_vserver_peer_permissions + namespace: '' + release_date: '2023-02-01' + 22.4.0: + changes: + bugfixes: + - na_ontap_interface - fix incorrect warning raised when try to rename interface. + - na_ontap_ldap_client - fix KeyError on ``name`` in ZAPI. + - na_ontap_ldap_client - fix duplicate entry error when used cluster vserver + in REST. + - na_ontap_san_create - Role documentation correct to from nas to san + - na_ontap_user - fix KeyError vserver in ZAPI. + - na_ontap_user_role - report error when command/command directory path set + in REST for ONTAP earlier versions. + - na_ontap_volume - fix error when try to unmount volume and modify snaplock + attribute. + - na_ontap_volume - fix idempotent issue when try to offline and modify other + volume options. + - na_ontap_vserver_audit - Added ``log_path`` option in modify. + - na_ontap_vserver_audit - fix invalid field value error of log retention count + and duration. + minor_changes: + - na_ontap_rest_cli - returns changed only for verbs POST, PATCH and DELETE. + - na_ontap_security_config - Added support for protocol version ``TLSV1.3``. + - na_ontap_security_config - Replaced private cli with REST API for GET and + PATCH. + - na_ontap_security_config - new option ``supported_cipher_suites`` added in + REST. + - na_ontap_snapmirror - new option ``identity_preservation`` added in REST. + - na_ontap_snapmirror - wait 600 seconds for snapmirror creation to complete + in REST. + - na_ontap_user_role - ``command_directory_name`` requires 9.11.1 or later with + REST. + - na_ontap_user_role - add support for rest-role ``privileges.access`` choices + ``read_create``, ``read_modify`` and ``read_create_modify``, supported only + with REST and requires ONTAP 9.11.1 or later versions. + fragments: + - DEVOPS-5310.yaml + - DEVOPS-5591.yaml + - DEVOPS-5808.yaml + - DEVOPS-5892.yaml + - DEVOPS-5899.yaml + - DEVOPS-5910.yaml + - DEVOPS-5913.yaml + - DEVOPS-5917.yaml + - DEVOPS-5919.yaml + - DEVOPS-5926.yaml + - DEVOPS-5938.yaml + - DEVOPS-5948.yaml + modules: + - description: NetApp ONTAP EMS Filter + name: na_ontap_ems_filter + namespace: '' + release_date: '2023-03-06' + 22.4.1: + changes: + bugfixes: + - na_ontap_snapmirror - fix invalid value error for return_timeout, modified + the value to 120 seconds. + fragments: + - DEVOPS-5952.yaml + release_date: '2023-03-07' + 22.5.0: + changes: + bugfixes: + - na_ontap_cifs - throw error if set ``unix_symlink`` in ZAPI. + - na_ontap_cifs - throw error if used options that require recent ONTAP version. + - na_ontap_file_security_permissions - error if more than one desired ACLs has + same user, access, access_control and apply_to. + - na_ontap_file_security_permissions - fix TypeError when current acls is None. + - na_ontap_file_security_permissions - fix idempotency issue on ``acls.propagation_mode`` + option. + - na_ontap_ipspace - fix cannot delete ipspace if ``from_ipspace`` is present. + - na_ontap_iscsi_security - error module if use_rest never is set. + - na_ontap_iscsi_security - fix KeyError on ``outbound_username`` option. + - na_ontap_qtree - ignore job entry does not exist error when creating qtree + with REST to bypass ONTAP issue with FSx. + - na_ontap_quotas - ignore job entry does not exist error when creating quota + with REST to bypass ONTAP issue with FSx. + - na_ontap_security_config - fix error on specifying protocol version ``TLSv1.1`` + when fips is enabled. + - na_ontap_snapmirror - Added option ``identity_preservation`` support from + ONTAP 9.11.1 in REST. + - na_ontap_snapmirror - error if identity_preservation set in ZAPI. + minor_changes: + - na_ontap_cifs - new options ``browsable`` and ``show_previous_versions`` added + in REST. + - na_ontap_cifs - removed default value for ``unix_symlink`` as its not supported + with ZAPI. + - na_ontap_cifs - updated documentation and examples for REST. + - na_ontap_file_security_permissions - updated module examples. + - na_ontap_ipspace - improved module fail error message in REST. + - na_ontap_rest_info - improved documentation for ``parameters`` option. + - na_ontap_security_config - updated documentation for ``supported_cipher_suites``. + - na_ontap_user - option ``vserver`` is not required with REST, ignore this + option to create cluster scoped user. + fragments: + - DEVOPS-3230.yaml + - DEVOPS-5816.yaml + - DEVOPS-5845.yaml + - DEVOPS-5859.yaml + - DEVOPS-5894.yaml + - DEVOPS-5960.yaml + - DEVOPS-5972.yaml + - DEVOPS-5983.yaml + - DEVOPS-5986.yaml + - DEVOPS-6005.yaml + release_date: '2023-04-05' + 22.6.0: + changes: + bugfixes: + - na_ontap_export_policy - fix cannot delete export policy if ``from_name`` + option is set. + - na_ontap_file_security_permissions_acl - fix idempotent issue on ``propagation_mode`` + option. + - na_ontap_qos_adaptive_policy_group - rename group when from_name is present + and state is present. + - na_ontap_qos_policy_group - one occurrence of msg missing in call to fail_json. + - na_ontap_s3_groups - fix cannot modify ``policies`` if not configured in create. + - na_ontap_s3_groups - fix error when current s3 groups has no users configured. + - na_ontap_security_certificates - fix duplicate entry error when ``vserver`` + option is set with admin vserver. + - na_ontap_snapmirror_policy - fix cannot disable ``is_network_compression_enabled`` + in REST. + - na_ontap_svm - skip modify validation when trying to delete svm. + minor_changes: + - na_ontap_aggregate - new REST only option ``tags`` added, requires ONTAP 9.13.1 + or later version. + - na_ontap_broadcast_domain - skip checking modify when ``state`` is absent. + - na_ontap_export_policy - added ``name`` to modify in module output if export + policy is renamed. + - na_ontap_qos_policy_group - new REST only option ``adaptive_qos_options.block_size`` + added, requires ONTAP 9.10.1 or later version. + - na_ontap_qos_policy_group - skip checking modify when ``state`` is absent. + - na_ontap_s3_buckets - new option ``type`` added, requires ONTAP 9.12.1 or + later. + - na_ontap_volume - new REST only option ``tags`` added, requires ONTAP 9.13.1 + or later version. + - retry create or modify when getting temporarily locked from changes error + in REST. + fragments: + - DEVOPS-6001.yaml + - DEVOPS-6014.yaml + - DEVOPS-6015.yaml + - DEVOPS-6191.yaml + - DEVOPS-6192.yaml + - DEVOPS-6193.yaml + - DEVOPS-6195.yaml + - DEVOPS-6209.yaml + - DEVOPS-6233.yaml + - DEVOPS-6235.yaml + modules: + - description: NetApp ONTAP module to modify kerberos interface. + name: na_ontap_kerberos_interface + namespace: '' + release_date: '2023-05-03' + 22.7.0: + changes: + bugfixes: + - na_ontap_login_messages - fix ``banner`` and ``motd_message`` not idempotent + when trailing '\n' is present. + - na_ontap_login_messages - fix idempotent issue on ``show_cluster_motd`` option + when try to set banner or motd_message for the first time in REST. + minor_changes: + - na_ontap_name_mappings - added choices ``s3_win`` and ``s3_unix`` to ``direction``, + requires ONTAP 9.12.1 or later. + - na_ontap_s3_buckets - new option ``nas_path`` added, requires ONTAP 9.12.1 + or later. + fragments: + - DEVOPS-6262.yaml + - DEVOPS-6266.yaml + modules: + - description: NetApp ONTAP configure active directory preferred domain controllers + name: na_ontap_active_directory_domain_controllers + namespace: '' + release_date: '2023-06-09' diff --git a/ansible_collections/netapp/ontap/changelogs/config.yaml b/ansible_collections/netapp/ontap/changelogs/config.yaml new file mode 100644 index 000000000..ee7745952 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: NetApp ONTAP Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/0-copy_ignore_txt.yml b/ansible_collections/netapp/ontap/changelogs/fragments/0-copy_ignore_txt.yml new file mode 100644 index 000000000..1520c1771 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/0-copy_ignore_txt.yml @@ -0,0 +1,4 @@ +--- +trivial: + - Copy ignore.txt. + - Update UT requirements. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml new file mode 100644 index 000000000..533eddf83 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/19.10.0.yaml @@ -0,0 +1,40 @@ +minor_changes: + - na_ontap_command - ``vserver`` - to allow command to run as either cluster admin or vserver admin. To run as vserver admin you must use the vserver option. + - na_ontap_motd - rename ``message`` to ``motd_message`` to avoid conflict with Ansible internal variable name. + - na_ontap_nvme_namespace - ``size_unit`` to specify size in different units. + - na_ontap_snapshot_policy - ``prefix`` - option to use for creating snapshot policy. + - | + Added REST support to existing modules. + By default, the module will use REST if the target system supports it, and the options are supported. Otherwise, it will switch back to ZAPI. + This behavior can be controlled with the ``use_rest`` option. + Always - to force REST. The module fails and reports an error if REST cannot be used. + Never - to force ZAPI. This could be useful if you find some incompatibility with REST, or want to confirm the behavior is identical between REST and ZAPI. + Auto - the default, as described above. + - na_ontap_ipspace - REST support + - na_ontap_export_policy - REST support + - na_ontap_ndmp - REST support - only ``enable`` and ``authtype`` are supported with REST + - na_ontap_net_routes - REST support + - na_ontap_qtree - REST support - ``oplocks`` is not supported with REST, defaults to enable. + - na_ontap_svm - REST support - ``root_volume``, ``root_volume_aggregate``, ``root_volume_security_style`` are not supported with REST. + - na_ontap_job_schedule - REST support + - na_ontap_cluster_config - role updated to support a cleaner playbook + - na_ontap_vserver_create - role updated to support a cleaner playbook + - na_ontap_nas_create - role updated to support a cleaner playbook + - na_ontap_san_create - role updated to support a cleaner playbook + +bugfixes: + - na_ontap_ndmp - minor documentation changes for restore_vm_cache_size and data_port_range. + - na_ontap_qtree - REST API takes "unix_permissions" as parameter instead of "mode". + - na_ontap_qtree - unix permission is not available when security style is ntfs + - na_ontap_user - minor documentation update for application parameter. + - na_ontap_volume - ``efficiency_policy`` was ignored + - na_ontap_volume - enforce that space_slo and space_guarantee are mutually exclusive + - na_ontap_svm - ``allowed_protocols`` added to param in proper way in case of using REST API + - na_ontap_firewall_policy - documentation changed for supported service parameter. + - na_ontap_net_subnet - fix ip_ranges option fails on existing subnet. + - na_ontap_snapshot_policy - fix vsadmin approach for managing snapshot policy. + - na_ontap_nvme_subsystem - fix fetching unique nvme subsytem based on vserver filter. + - na ontap_net_routes - change metric type from string to int. + - na_ontap_cifs_server - minor documentation changes correction of create example with "name" parameter and adding type to parameters. + - na_ontap_vserver_cifs_security - fix int and boolean options when modifying vserver cifs security. + - na_ontap_net_subnet - fix rename idempotency issue and updated rename check. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml new file mode 100644 index 000000000..d61b59ee3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/19.11.0.yaml @@ -0,0 +1,16 @@ +minor_changes: + - na_ontap_cluster - added single node cluster option, also now supports for modify cluster contact and location option. + - na_ontap_info - Now allow you use to vsadmin to get info (Must user ``vserver`` option). + - na_ontap_info - Added ``vscan_status_info``, ``vscan_scanner_pool_info``, ``vscan_connection_status_all_info``, ``vscan_connection_extended_stats_info`` + - na_ontap_efficiency_policy - ``changelog_threshold_percent`` to set the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour. + +### Bug Fixes +bugfixes: + - na_ontap_cluster - autosupport log pushed after cluster create is performed, removed license add or remove option. + - na_ontap_dns - report error if modify or delete operations are attempted on cserver when using REST. Make create operation idempotent for cserver when using REST. Support for modify/delete on cserver when using REST will be added later. + - na_ontap_firewall_policy - portmap added as a valid service + - na_ontap_net_routes - REST does not support the ``metric`` attribute + - na_ontap_snapmirror - added initialize boolean option which specifies whether to initialize SnapMirror relation. + - na_ontap_volume - fixed error when deleting flexGroup volume with ONTAP 9.7. + - na_ontap_volume - tiering option requires 9.4 or later (error on volume-comp-aggr-attributes) + - na_ontap_vscan_scanner_pool - fix module only gets one scanner pool. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml new file mode 100644 index 000000000..ce431b9b5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.1.0.yaml @@ -0,0 +1,20 @@ +minor_changes: + - na_ontap_aggregate - add ``snaplock_type``. + - na_ontap_info - New info's added ``cifs_server_info``, ``cifs_share_info``, ``cifs_vserver_security_info``, ``cluster_peer_info``, ``clock_info``, ``export_policy_info``, ``export_rule_info``, ``fcp_adapter_info``, ``fcp_alias_info``, ``fcp_service_info``, ``job_schedule_cron_info``, ``kerberos_realm_info``, ``ldap_client``, ``ldap_config``, ``net_failover_group_info``, ``net_firewall_info``, ``net_ipspaces_info``, ``net_port_broadcast_domain_info``, ``net_routes_info``, ``net_vlan_info``, ``nfs_info``, ``ntfs_dacl_info``, ``ntfs_sd_info``, ``ntp_server_info``, ``role_info``, ``service_processor_network_info``, ``sis_policy_info``, ``snapmirror_policy_info``, ``snapshot_policy_info``, ``vscan_info``, ``vserver_peer_info`` + - na_ontap_igroup_initiator - ``force_remove`` to forcibly remove initiators from an igroup that is currently mapped to a LUN. + - na_ontap_interface - ``failover_group`` to specify the failover group for the LIF. ``is_ipv4_link_local`` to specify the LIF's are to acquire a ipv4 link local address. + - na_ontap_rest_cli - add OPTIONS as a supported verb and return list of allowed verbs. + - na_ontap_volume - add ``group_id`` and ``user_id``. + - na_ontap_dns - added REST support for dns creation and modification on cluster vserver. + +bugfixes: + - na_ontap_aggregate - Fixed traceback when running as vsadmin and cleanly error out. + - na_ontap_command - stdout_lines_filter contains data only if include/exlude_lines parameter is used. (zeten30) + - na_ontap_command - stripped_line len is checked only once, filters are inside if block. (zeten30) + - na_ontap_interface - allow module to run on node before joining the cluster. + - na_ontap_net_ifgrp - Fixed error for na_ontap_net_ifgrp if no port is given. + - na_ontap_snapmirror - Fixed traceback when running as vsadmin. Do not attempt to break a relationship that is 'Uninitialized'. + - na_ontap_snapshot_policy - Fixed KeyError on ``prefix`` issue when prefix parameter isn't supplied. + - na_ontap_volume - Fixed error reporting if efficiency policy cannot be read. Do not attempt to read efficiency policy if not needed. + - na_ontap_volume - Fixed error when modifying volume efficiency policy. + - na_ontap_volume_clone - Fixed KeyError exception on ``volume`` diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml new file mode 100644 index 000000000..db7b40738 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.2.0.yaml @@ -0,0 +1,17 @@ +minor_changes: + - na_ontap_info - New info's added ``snapshot_info`` + - na_ontap_info - ``max_records`` option to set maximum number of records to return per subset. + - na_ontap_snapmirror - ``relationship_state`` option for breaking the snapmirror relationship. + - na_ontap_snapmirror - ``update_snapmirror`` option for updating the snapmirror relationship. + - na_ontap_volume_clone - ``split`` option to split clone volume from parent volume. + - na_ontap_nas_create - role - fix typo in README file, add CIFS example. - + +bugfixes: + - na_ontap_cifs_server - Fixed KeyError exception on 'cifs_server_name' + - na_ontap_command - fixed traceback when using return_dict if u'1' is present in result value. + - na_ontap_login_messages - Fixed example documentation and spelling mistake issue + - na_ontap_nvme_subsystem - fixed bug when creating subsystem, vserver was not filtered. + - na_ontap_svm - if snapshot policy is changed, modify fails with "Extra input - snapshot_policy" + - na_ontap_svm - if language C.UTF-8 is specified, the module is not idempotent + - na_ontap_volume_clone - fixed 'Extra input - parent-vserver' error when running as cluster admin. + - na_ontap_qtree - Fixed issue with Get function for REST diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml new file mode 100644 index 000000000..4d63f0e67 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.3.0.yaml @@ -0,0 +1,8 @@ +minor_changes: + - na_ontap_info - New info's added `cluster_identity_info`` + - na_ontap_info - New info's added ``storage_bridge_info`` + - na_ontap_snapmirror - performs resync when the ``relationship_state`` is active and the current state is broken-off. + +bugfixes: + - na_ontap_vscan_scanner_pool - has been updated to match the standard format used for all other ontap modules + - na_ontap_volume_snaplock - Fixed KeyError exception on 'is-volume-append-mode-enabled' diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml new file mode 100644 index 000000000..03186163d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.4.0.yaml @@ -0,0 +1,30 @@ +minor_changes: + - na_ontap_aggregate - ``disk_count`` option allows adding additional disk to aggregate. + - na_ontap_info - ``max_records`` option specifies maximum number of records returned in a single ZAPI call. + - na_ontap_info - ``summary`` option specifies a boolean flag to control return all or none of the info attributes. + - na_ontap_info - new fact - iscsi_service_info. + - na_ontap_info - new fact - license_info. + - na_ontap_info - new fact - metrocluster_info. + - na_ontap_info - new fact - metrocluster_check_info. + - na_ontap_info - new fact - metrocluster_node_info. + - na_ontap_info - new fact - net_interface_service_policy_info. + - na_ontap_info - new fact - ontap_system_version. + - na_ontap_info - new fact - ontapi_version (and deprecate ontap_version, both fields are reported for now). + - na_ontap_info - new fact - qtree_info. + - na_ontap_info - new fact - quota_report_info. + - na_ontap_info - new fact - snapmirror_destination_info. + - na_ontap_interface - ``service_policy`` option to identify a single service or a list of services that will use a LIF. + - na_ontap_kerberos_realm - ``ad_server_ip`` option specifies IP Address of the Active Directory Domain Controller (DC). + - na_ontap_kerberos_realm - ``ad_server_name`` option specifies Host name of the Active Directory Domain Controller (DC). + - na_ontap_snapmirror_policy - REST is included and all defaults are removed from options. + - na_ontap_snapmirror - ``relationship-info-only`` option allows to manage relationship information. + - na_ontap_software_update - ``download_only`` options allows to download cluster image without software update. + - na_ontap_volume - ``snapshot_auto_delete`` option allows to manage auto delete settings of a specified volume. + +bugfixes: + - na_ontap_cifs_server - delete AD account if username and password are provided when state=absent + - na_ontap_info - return all records of each gathered subset. + - na_ontap_info - cifs_server_info - fix KeyError exception on ``domain`` if only ``domain-workgroup`` is present. + - na_ontap_iscsi_security - Fixed modify functionality for CHAP and typo correction + - na_ontap_kerberos_realm - fix ``kdc_vendor`` case sensitivity issue. + - na_ontap_snapmirror - calling quiesce before snapmirror break. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml new file mode 100644 index 000000000..3c772aecf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.4.1.yaml @@ -0,0 +1,10 @@ +minor_changes: + - na_ontap_firmware_upgrade - ``force_disruptive_update`` and ``package_url`` options allows to make choices for download and upgrading packages. + - na_ontap_autosupport_invoke - added REST support for sending autosupport message. + - na_ontap_vserver_create has a new default variable ``netapp_version`` set to 140. + If you are running 9.2 or below please add the variable to your playbook and set to 120 + +bugfixes: + - na_ontap_volume - ``volume_security_style`` option now allows modify. + - na_ontap_info - ``metrocluster_check_info`` has been removed as it was breaking the info module for everyone who didn't have a metrocluster set up. + We are working on adding this back in a future update. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml new file mode 100644 index 000000000..7ea16526d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.5.0.yaml @@ -0,0 +1,53 @@ +minor_changes: + - na_ontap_aggregate - ``raid_type`` options supports 'raid_0' for ONTAP Select. + - na_ontap_cluster_peer - ``encryption_protocol_proposed`` option allows specifying encryption protocol to be used for inter-cluster communication. + - na_ontap_info - new fact - aggr_efficiency_info. + - na_ontap_info - new fact - cluster_switch_info. + - na_ontap_info - new fact - disk_info. + - na_ontap_info - new fact - env_sensors_info. + - na_ontap_info - new fact - net_dev_discovery_info. + - na_ontap_info - new fact - service_processor_info. + - na_ontap_info - new fact - shelf_info. + - na_ontap_info - new fact - sis_info. + - na_ontap_info - new fact - subsys_health_info. + - na_ontap_info - new fact - sysconfig_info. + - na_ontap_info - new fact - sys_cluster_alerts. + - na_ontap_info - new fact - volume_move_target_aggr_info. + - na_ontap_info - new fact - volume_space_info. + - na_ontap_nvme_namespace - ``block_size`` option allows specifying size in bytes of a logical block. + - na_ontap_snapmirror - snapmirror now allows resume feature. + - na_ontap_volume - ``cutover_action`` option allows specifying the action to be taken for cutover. + - na_ontap_cluster_config - role - Port Flowcontrol and autonegotiate can be set in role + +bugfixes: + - REST API call now honors the ``http_port`` parameter. + - REST API detection now works with vserver (use_rest - Auto). + - na_ontap_autosupport_invoke - when using ZAPI and name is not given, send autosupport message to all nodes in the cluster. + - na_ontap_cg_snapshot - properly states it does not support check_mode. + - na_ontap_cluster - ONTAP 9.3 or earlier does not support ZAPI element single-node-cluster. + - na_ontap_cluster_ha - support check_mode. + - na_ontap_cluster_peer - support check_mode. + - na_ontap_cluster_peer - EMS log wrongly uses destination credentials with source hostname. + - na_ontap_disks - support check_mode. + - na_ontap_dns - support check_mode. + - na_ontap_efficiency_policy - change ``duration`` type from int to str to support '-' input. + - na_ontap_fcp - support check_mode. + - na_ontap_flexcache - support check_mode. + - na_ontap_info - `metrocluster_check_info` does not trigger a traceback but adds an "error" info element if the target system is not set up for metrocluster. + - na_ontap_license - support check_mode. + - na_ontap_login_messages - fix documentation link. + - na_ontap_node - support check mode. + - na_ontap_ntfs_sd - documentation string update for examples and made sure owner or group not mandatory. + - na_ontap_ports - now support check mode. + - na_ontap_restit - error can be a string in addition to a dict. This fix removes a traceback with AttributeError. + - na_ontap_routes - support Check Mode correctly. + - na_ontap_snapmirror - support check_mode. + - na_ontap_software_update - Incorrectly stated that it support check mode, it does not. + - na_ontap_svm_options - support check_mode. + - na_ontap_volume - improve error reporting if required parameter is present but not set. + - na_ontap_volume - suppress traceback in wait_for_completion as volume may not be completely ready. + - na_ontap_volume - fix KeyError on 'style' when volume is offline. + - na_ontap_volume_autosize - Support check_mode when `reset` option is given. + - na_ontap_volume_snaplock - fix documentation link. + - na_ontap_vserver_peer - support check_mode. + - na_ontap_vserver_peer - EMS log wrongly uses destination credentials with source hostname. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml new file mode 100644 index 000000000..792ab6144 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.6.0.yaml @@ -0,0 +1,37 @@ +minor_changes: + - na_ontap_disks - ``disk_type`` option allows to assign specified type of disk. + - na_ontap_firmware_upgrade - ignore timeout when downloading image unless ``fail_on_502_error`` is set to true. + - na_ontap_info - ``desired_attributes`` advanced feature to select which fields to return. + - na_ontap_info - ``use_native_zapi_tags`` to disable the conversion of '_' to '-' for attribute keys. + - na_ontap_rest_info - ``fields`` options to request specific fields from subset. + - na_ontap_software_update - ``stabilize_minutes`` option specifies number of minutes needed to stabilize node before update. + - na_ontap_snapmirror - now performs restore with optional field ``source_snapshot`` for specific snapshot or uses latest. + - na_ontap_ucadapter - ``pair_adapters`` option allows specifying the list of adapters which also need to be offline. + - na_ontap_user - ``authentication_password`` option specifies password for the authentication protocol of SNMPv3 user. + - na_ontap_user - ``authentication_protocol`` option specifies authentication protocol fo SNMPv3 user. + - na_ontap_user - ``engine_id`` option specifies authoritative entity's EngineID for the SNMPv3 user. + - na_ontap_user - ``privacy_password`` option specifies password for the privacy protocol of SNMPv3 user. + - na_ontap_user - ``privacy_protocol`` option specifies privacy protocol of SNMPv3 user. + - na_ontap_user - ``remote_switch_ipaddress`` option specifies the IP Address of the remote switch of SNMPv3 user. + - na_ontap_volume - ``check_interval`` option checks if a volume move has been completed and then waits this number of seconds before checking again. + - na_ontap_volume - ``auto_remap_luns`` option controls automatic mapping of LUNs during volume rehost. + - na_ontap_volume - ``force_restore`` option forces volume to restore even if the volume has one or more newer Snapshotcopies. + - na_ontap_volume - ``force_unmap_luns`` option controls automatic unmapping of LUNs during volume rehost. + - na_ontap_volume - ``from_vserver`` option allows volume rehost from one vserver to another. + - na_ontap_volume - ``preserve_lun_ids`` option controls LUNs in the volume being restored will remain mapped and their identities preserved. + - na_ontap_volume - ``snapshot_restore`` option specifies name of snapshot to restore from. + - all modules - ``cert_filepath``, ``key_filepath`` to enable SSL certificate authentication (python 2.7 or 3.x). + - all modules - SSL certificate authentication in addition to username/password (python 2.7 or 3.x). + - na_ontap_pb_install_SSL_certificate.yml - playbook example - installing a self-signed SSL certificate, and enabling SSL certificate authentication. + - na_ontap_user - added REST support for ONTAP user creation, modification & deletion. + +bugfixes: + - na_ontap_firmware_upgrade - ignore timeout when downloading firmware images by default. + - na_ontap_info - conversion from '-' to '_' was not done for lists of dictionaries. + - na_ontap_ntfs_dacl - example fix in documentation string. + - na_ontap_snapmirror - could not delete all rules (bug in netapp_module). + - na_ontap_volume - modify was invoked multiple times when once is enough. + - na_ontap_volume - fix KeyError on 'style' when volume is of type - data-protection. + - na_ontap_volume - `wait_on_completion` is supported with volume moves. + - module_utils/netapp_module - cater for empty lists in get_modified_attributes(). + - module_utils/netapp_module - cater for lists with duplicate elements in compare_lists(). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml new file mode 100644 index 000000000..572d8499b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.6.1.yaml @@ -0,0 +1,9 @@ +minor_changes: +- na_ontap_firmware_upgrade - ``reboot_sp`` - reboot service processor before downloading package. +- na_ontap_firmware_upgrade - ``rename_package`` - rename file when downloading service processor package. +- na_ontap_firmware_upgrade - ``replace_package`` - replace local file when downloading service processor package. + +bugfixes: +- na_ontap_firmware_upgrade - images are not downloaded, but the module reports success. +- na_ontap_user - fixed KeyError if password is not provided. +- na_ontap_password - do not error out if password is identical to previous password (idempotency). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml new file mode 100644 index 000000000..7dae6c229 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.7.0.yaml @@ -0,0 +1,24 @@ +minor_changes: + - na_ontap_info - support ``continue_on_error`` option to continue when a ZAPI is not supported on a vserver, or for cluster RPC errors. + - na_ontap_info - support ``query`` option to specify which objects to return. + - na_ontap_info - support ``vserver`` tunneling to limit output to one vserver. + - na_ontap_snapmirror_policy - support for SnapMirror policy rules. + - na_ontap_vscan_scanner_pool - support modification. + - na_ontap_rest_info - Support for gather subsets - ``cluster_node_info, cluster_peer_info, disk_info, cifs_services_info, cifs_share_info``. + - module_utils/netapp - add retry on wait_on_job when job failed. Abort 3 consecutive errors. + - na_ontap_pb_get_online_volumes.yml - example playbook to list volumes that are online (or offline). + - na_ontap_pb_install_SSL_certificate_REST.yml - example playbook to install SSL certificates using REST APIs. + +bugfixes: + - na_ontap_command - replace invalid backspace characters (0x08) with '.'. + - na_ontap_firmware_download - exception on PCDATA if ONTAP returns a BEL (0x07) character. + - na_ontap_info - lists were incorrectly processed in convert_keys, returning {}. + - na_ontap_info - qtree_info is missing most entries. Changed key from `vserver:id` to `vserver:volume:id` . + - na_ontap_iscsi_security - adding no_log for password parameters. + - na_ontap_portset - adding explicit error message as modify portset is not supported. + - na_ontap_snapmirror - fixed snapmirror delete for loadsharing to not go to quiesce state for the rest of the set. + - na_ontap_ucadapter - fixed KeyError if type is not provided and mode is 'cna'. + - na_ontap_user - checked `applications` does not contain snmp when using REST API call. + - na_ontap_user - fixed KeyError if locked key not set with REST API call. + - na_ontap_user - fixed KeyError if vserver - is empty with REST API call (useful to indicate cluster scope). + - na_ontap_volume - fixed KeyError when getting info on a MVD volume diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml new file mode 100644 index 000000000..59f05d2ea --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.8.0.yaml @@ -0,0 +1,33 @@ +minor_changes: + - na_ontap_aggregate - support ``disk_size_with_unit`` option. + - na_ontap_ldap_client - support ``ad_domain`` and ``preferred_ad_server`` options. + - na_ontap_rest_info - Support for gather subsets - ``cloud_targets_info, cluster_chassis_info, cluster_jobs_info, cluster_metrics_info, cluster_schedules, broadcast_domains_info, cluster_software_history, cluster_software_packages, network_ports_info, ip_interfaces_info, ip_routes_info, ip_service_policies, network_ipspaces_info, san_fc_logins_info, san_fc_wppn-aliases, svm_dns_config_info, svm_ldap_config_info, svm_name_mapping_config_info, svm_nis_config_info, svm_peers_info, svm_peer-permissions_info``. + - na_ontap_rest_info - Support for gather subsets for 9.8+ - ``cluster_metrocluster_diagnostics``. + - na_ontap_qtree - ``force_delete`` option with a DEFAULT of ``true`` so that ZAPI behavior is aligned with REST. + - na_ontap_security_certificates - ``ignore_name_if_not_supported`` option to not fail if ``name`` is present since ``name`` is not supported in ONTAP 9.6 and 9.7. + - na_ontap_software_update - added ``timeout`` option to give enough time for the update to complete. + - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9. + - add ``type:`` and ``elements:`` information where missing. + - update ``required:`` information. + +bugfixes: + - na_ontap_aggregate - ``disk-info`` error when using ``disks`` option. + - na_ontap_autosupport_invoke - ``message`` has changed to ``autosupport_message`` as Redhat has reserved this word. ``message`` has been alias'd to ``autosupport_message``. + - na_ontap_cifs_vserver - fix documentation and add more examples. + - na_ontap_cluster - module was not idempotent when changing location or contact information. + - na_ontap_igroup - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). + - na_ontap_igroup_initiator - idempotency issue when using uppercase hex digits (A, B, C, D, E, F) in WWN (ONTAP uses lowercase). + - na_ontap_security_certificates - allows (``common_name``, ``type``) as an alternate key since ``name`` is not supported in ONTAP 9.6 and 9.7. + - na_ontap_info - Fixed error causing module to fail on ``metrocluster_check_info``, ``env_sensors_info`` and ``volume_move_target_aggr_info``. + - na_ontap_snapmirror - fixed KeyError when accessing ``elationship_type`` parameter. + - na_ontap_snapmirror_policy - fixed a race condition when creating a new policy. + - na_ontap_snapmirror_policy - fixed idempotency issue withis_network_compression_enabled for REST. + - na_ontap_software_update - ignore connection errors during update as nodes cannot be reachable. + - na_ontap_user - enable lock state and password to be set in the same task for existing user. + - na_ontap_volume - issue when snapdir_access and atime_update not passed together. + - na_ontap_vscan_on_access_policy - ``bool`` type was not properly set for ``scan_files_with_no_ext``. + - na_ontap_vscan_on_access_policy - ``policy_status`` enable/disable option was not supported. + - na_ontap_vscan_on_demand_task - ``file_ext_to_include`` was not handled properly. + - na_ontap_vscan_scanner_pool_policy - scanner_pool apply policy support on modification. + - na_ontap_vserver_create(role) - lif creation now defaults to system-defined unless iscsi lif type. + - use_rest is now case insensitive. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml new file mode 100644 index 000000000..2315af8db --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/20.9.0.yaml @@ -0,0 +1,17 @@ +minor_changes: + - na_ontap_cluster - ``node_name`` to set the node name when adding a node, or as an alternative to `cluster_ip_address`` to remove a node. + - na_ontap_cluster - ``state`` can be set to ``absent`` to remove a node identified with ``cluster_ip_address`` or ``node_name``. + - na_ontap_qtree - ``wait_for_completion`` and ``time_out`` to wait for qtree deletion when using REST. + - na_ontap_quotas - ``soft_disk_limit`` and ``soft_file_limit`` for the quota target. + - na_ontap_rest_info - Support for gather subsets - ``initiator_groups_info, san_fcp_services, san_iscsi_credentials, san_iscsi_services, san_lun_maps, storage_luns_info, storage_NVMe_namespaces.`` + +bugfixes: + - na_ontap_cluster - ``check_mode`` is now working properly. + - na_ontap_interface - ``home_node`` is not required in pre-cluster mode. + - na_ontap_interface - ``role`` is not required if ``service_policy`` is present and ONTAP version is 9.8. + - na_ontap_interface - traceback in get_interface if node is not reachable. + - na_ontap_job_schedule - allow ``job_minutes`` to set number to -1 for job creation with REST too. + - na_ontap_qtree - fixed ``None is not subscriptable`` exception on rename operation. + - na_ontap_volume - fixed ``KeyError`` exception on ``size`` when reporting creation error. + - na_ontap_* - change version_added from '2.6' to '2.6.0' where applicable to satisfy sanity checker. + - netapp.py - uncaught exception (traceback) on zapi.NaApiError. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1661.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1661.yaml new file mode 100644 index 000000000..564956af3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1661.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_broadcast_domain - Added REST support to the broadcast domain module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1665.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1665.yaml new file mode 100644 index 000000000..8aa2e9e21 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1665.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_firmware_upgrade - REST support to download firmware and reboot SP. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1926.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1926.yaml new file mode 100644 index 000000000..640a94636 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-1926.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_ldap - fall back to ZAPI when ``use_rest`` is set to ``auto`` or fail when REST is desired. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2353.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2353.yaml new file mode 100644 index 000000000..6b9478b74 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2353.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_motd - deprecated module warning and to use na_ontap_login_messages. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2422.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2422.yaml new file mode 100644 index 000000000..f352bf2e0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2422.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_restit - new option ``wait_for_completion`` to support asynchronous operations and wait for job completion. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml new file mode 100644 index 000000000..1981166e4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2426.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror_policy - report error when attempting to change ``policy_type`` rather than taking no action. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459.yaml new file mode 100644 index 000000000..74221fcdf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_ntp - Added REST support to the ntp module diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459b.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459b.yaml new file mode 100644 index 000000000..405c3f664 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2459b.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - Added REST support to the volume module \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2491.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2491.yaml new file mode 100644 index 000000000..aaa498334 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2491.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snmp - SNMP module wrong ``access_control`` issue and error handling fix. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml new file mode 100644 index 000000000..2af3377e7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2668.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - fix volume type modify issue by reporting error. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2928.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2928.yaml new file mode 100644 index 000000000..0fb841b06 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2928.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_info - deprecate ``state`` option. + - na_ontap_rest_info - deprecate ``state`` option. +bugfixes: + - na_ontap_rest_info - ``changed`` was set to "False" rather than boolean False. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml new file mode 100644 index 000000000..bf01eaaa6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2964.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_info - New options ``cifs_options_info``, ``cluster_log_forwarding_info``, ``event_notification_destination_info``, ``event_notification_info``, ``security_login_role_config_info``, ``security_login_role_info`` have been added. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml new file mode 100644 index 000000000..6d86e9088 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2965.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cluster_peer - optional parameter ``ipspace`` added for cluster peer. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2972.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2972.yaml new file mode 100644 index 000000000..a1582b1dd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-2972.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_volume - new option ``max_files`` to increase the inode count value. + - na_ontap_volume - allow to modify volume after rename. +bugfixes: + - na_ontap_volume - do not attempt to mount volume if current state is offline. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml new file mode 100644 index 000000000..bcefabb0e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3113.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cluster - ``single_node_cluster`` option was ignored. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3137.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3137.yaml new file mode 100644 index 000000000..5e5243d58 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3137.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cluster - ``time_out`` to wait for cluster creation, adding and removing a node. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml new file mode 100644 index 000000000..f32d66919 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3139.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_aggregate - support concurrent actions for rename/modify/add_object_store and create/add_object_store. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3148.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3148.yaml new file mode 100644 index 000000000..ed2e91f5e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3148.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_interface - Added REST support to the interface module (for IP and FC interfaces). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml new file mode 100644 index 000000000..0f09e24fd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3149.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_cifs - output ``modified`` if a modify action is taken. + - na_ontap_svm - output ``modified`` if a modify action is taken. + +bugfixes: + - na_ontap_cifs - fix idempotency issue when ``show-previous-versions`` is used. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml new file mode 100644 index 000000000..0888400e9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3167.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - ``encrypt`` with a value of ``false`` is ignored when creating a volume. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3175.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3175.yaml new file mode 100644 index 000000000..db164c6a8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3175.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_node - added modify function for location and asset tag for node. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml new file mode 100644 index 000000000..585f302b6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3178.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - Support for gather subsets - ``application_info, application_template_info, autosupport_config_info , autosupport_messages_history, ontap_system_version, storage_flexcaches_info, storage_flexcaches_origin_info, storage_ports_info, storage_qos_policies, storage_qtrees_config, storage_quota_reports, storage_quota_policy_rules, storage_shelves_config, storage_snapshot_policies, support_ems_config, support_ems_events, support_ems_filters`` diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml new file mode 100644 index 000000000..dbb99ad4a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3181.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_quotas - New option ``activate_quota_on_change`` to resize or reinitialize quotas. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml new file mode 100644 index 000000000..34299bfe8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3194.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_info - better reporting on KeyError traceback, option to ignore error. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3230.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3230.yaml new file mode 100644 index 000000000..806abda70 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3230.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_ipspace - fix cannot delete ipspace if ``from_ipspace`` is present. +minor_changes: + - na_ontap_ipspace - improved module fail error message in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3241.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3241.yaml new file mode 100644 index 000000000..a4452b805 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3241.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_users - new option ``application_dicts`` to associate multiple authentication methods to an application. + - na_ontap_users - new option ``application_strs`` to disambiguate ``applications``. + - na_ontap_users - new option ``replace_existing_apps_and_methods``. + - na_ontap_users - new suboption ``second_authentication_method`` with ``application_dicts`` option. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3242.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3242.yaml new file mode 100644 index 000000000..cd2c30e78 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3242.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_firmware_upgrade - new option for firmware type ``storage`` added. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml new file mode 100644 index 000000000..94dd3585c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3251.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_info - KeyError on ``tree`` for quota_report_info. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml new file mode 100644 index 000000000..067c2d18f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3262.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_mcc_mediator - improve error reporting when REST is not available. + - na_ontap_metrocluster - improve error reporting when REST is not available. + - na_ontap_wwpn_alias - improve error reporting when REST is not available. +bugfixes: + - na_ontap_ipspace - invalid call in error reporting (double error). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml new file mode 100644 index 000000000..3e9add073 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3304.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_software_update - add `force_update` option to ignore current version. + +bugfixes: + - na_ontap_software_update - module is not idempotent. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml new file mode 100644 index 000000000..bfef391bf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3310.yml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_interface - minor example update. + - na_ontap_export_policy_rule - minor doc updates. +bugfixes: + - na_ontap_info - Use ``node-id`` as key rather than ``current-version``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml new file mode 100644 index 000000000..e38bfeefb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3312.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_firmware_upgrade - fix ValueError issue when processing URL error. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml new file mode 100644 index 000000000..dacc27c28 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3329.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_svm - warning for ``aggr_list`` wildcard value(``*``) in create idempotency. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml new file mode 100644 index 000000000..51e0444f9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3346.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_broadcast_domain_ports - handle ``changed`` for check_mode and report correctly. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml new file mode 100644 index 000000000..f1636604b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3354.yaml @@ -0,0 +1,2 @@ +bugfixes: + - All REST modules, will not fail if a job fails diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml new file mode 100644 index 000000000..c1214c151 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3358.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_info - do not require write access privileges. This also enables other modules to work in check_mode without write access permissions. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml new file mode 100644 index 000000000..ffd6382a8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3366.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - support modify for space_allocation and space_reserve. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml new file mode 100644 index 000000000..2a98711b0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3367.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - new option ``from_name`` to rename a LUN. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml new file mode 100644 index 000000000..18e2dd26d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3368.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_volume - ``nas_application_template`` to create a volume using nas application REST API. + - na_ontap_volume - ``size_change_threshold`` to ignore small changes in volume size. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml new file mode 100644 index 000000000..7193ea400 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3369.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_igroup - new option ``os_type`` to replace ``ostype`` (but ostype is still accepted). + - na_ontap_lun - new option ``os_type`` to replace ``ostype`` (but ostype is still accepted), and removed default to ``image``. + - na_ontap_lun - new option ``san_application_template`` to create LUNs without explicitly creating a volume and using REST APIs. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3370.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3370.yaml new file mode 100644 index 000000000..7a72a98b6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3370.yaml @@ -0,0 +1,11 @@ +minor_changes: + - na_ontap_snapmirror - use REST API for create action if target supports it. (ZAPIs are still used for all other actions). + - na_ontap_snapmirror - new option ``create_destination`` to automatically create destination endpoint (ONTAP 9.7). + - na_ontap_snapmirror - new option ``destination_cluster`` to automatically create destination SVM for SVM DR (ONTAP 9.7). + - na_ontap_snapmirror - new option ``source_cluster`` to automatically set SVM peering (ONTAP 9.7). + - na_ontap_volume - use REST API for delete operation if targets supports it. + +bugfixes: + - na_ontap_lun - REST expects 'all' for tiering policy and not 'backup'. + - na_ontap_snapmirror - wait up to 5 minutes for abort to complete before issuing a delete. + - na_ontap_volume - REST expects 'all' for tiering policy and not 'backup'. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml new file mode 100644 index 000000000..78b525b13 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3371.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_volume - ``compression`` to enable compression on a FAS volume. + - na_ontap_volume - ``inline-compression`` to enable inline compression on a volume. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml new file mode 100644 index 000000000..2ce1895f8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3385.yaml @@ -0,0 +1,3 @@ +minor_changes: + - all ZAPI modules - optimize Basic Authentication by adding Authorization header proactively. + - all ZAPI modules - new ``classic_basic_authorization`` feature_flag to disable adding Authorization header proactively. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml new file mode 100644 index 000000000..999c5c74b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3386.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_user - application expects only ``service_processor`` but module supports ``service-processor``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml new file mode 100644 index 000000000..735e03719 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3390.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_qos_policy_group - new option ``is_shared`` for sharing QOS SLOs or not. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml new file mode 100644 index 000000000..55b4a37e9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3392.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - new option ``qos_policy_group`` to assign a qos_policy_group to a LUN. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml new file mode 100644 index 000000000..7b48e0958 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3399.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_quotas - New option ``perform_user_mapping`` to perform user mapping for the user specified in quota-target. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml new file mode 100644 index 000000000..27199ea4c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3400.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_quota_policy - new option ``auto_assign`` to assign quota policy to vserver. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml new file mode 100644 index 000000000..ed3cb6eae --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3401.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs - fix for AttributeError - 'NoneType' object has no attribute 'get' on line 300 diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3439.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3439.yaml new file mode 100644 index 000000000..32006da0c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3439.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_snapmirror - report error when attempting to change relationship_type. + - na_ontap_snapmirror - fix job update failures for load_sharing mirrors. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml new file mode 100644 index 000000000..6c5adf7dc --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3442.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - checking for success before failure lead to 'NoneType' object has no attribute 'get_child_by_name' when modifying a Flexcache volume. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml new file mode 100644 index 000000000..e9e292f8d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3443.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - Support for gather subsets - ``cifs_home_directory_info, cluster_software_download, event_notification_info, event_notification_destination_info, security_login_info, security_login_rest_role_info`` diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml new file mode 100644 index 000000000..babca44d3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3454.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - ``sizing_method`` to resize a FlexGroup using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3479.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3479.yaml new file mode 100644 index 000000000..ee63cd989 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3479.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_igroup - added REST support for ONTAP igroup creation, modification, and deletion. +bugfixes: + - na_ontap_igroup - report error when attempting to modify an option that cannot be changed. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3480.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3480.yaml new file mode 100644 index 000000000..f1fe9f360 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3480.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_debug - connection diagnostics added for invalid ipaddress and DNS hostname errors. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3483.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3483.yaml new file mode 100644 index 000000000..63ffb4f23 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3483.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_debug - additional checks when REST is available to help debug vserver connectivity issues. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3490.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3490.yaml new file mode 100644 index 000000000..4f8f8fe29 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3490.yaml @@ -0,0 +1,2 @@ +minor_changes: + - general - improve error reporting when older version of netapp-lib is used. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3494.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3494.yaml new file mode 100644 index 000000000..37b7867ab --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3494.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_quotas - Handle blank string idempotency issue for ``quota_target`` in quotas module. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3497.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3497.yaml new file mode 100644 index 000000000..2fa289d12 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3497.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_lun - support increasing lun_count and total_size when using SAN application template. + - na_ontap_lun - new options ``total_size`` and ``total_size_unit`` when using SAN application template. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3501.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3501.yaml new file mode 100644 index 000000000..d8ea25d5b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3501.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_volume - detect and report error when attempting to change FlexVol into FlexGroup. + - na_ontap_volume - report error if ``aggregate_name`` option is used with a FlexGroup. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3510.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3510.yaml new file mode 100644 index 000000000..321171b84 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3510.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_quota - allow to turn quota on/off without providing quota_target or type. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3515.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3515.yaml new file mode 100644 index 000000000..c73b49913 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3515.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - report error when attempting to change the nas_application tiering control from disalllowed to required, or reciprocally. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3534.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3534.yaml new file mode 100644 index 000000000..2534f1239 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3534.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_cluster_peer - new option ``peer_options`` to use different credentials on peer. + - na_ontap_vserver_peer - new option ``peer_options`` to use different credentials on peer. +bugfixes: + - na_ontap_cluster_peer - KeyError on username when using certicate. + - na_ontap_vserver_peer - KeyError on username when using certicate. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3535.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3535.yaml new file mode 100644 index 000000000..a8949ff41 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3535.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_lun - new option ``scope`` to explicitly force operations on the SAN application or a single LUN. + - na_ontap_lun - convert existing LUNs and supporting volume to a smart container within a SAN application. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3536.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3536.yaml new file mode 100644 index 000000000..2a79cdd6c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3536.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_igroups - new option ``igroups`` to support nested igroups (requires ONTAP 9.9). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3540.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3540.yaml new file mode 100644 index 000000000..a843550ea --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3540.yaml @@ -0,0 +1,2 @@ +bugfixes: + - All REST modules - ONTAP 9.4 and 9.5 are incorrectly detected as supporting REST with ``use_rest:auto``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3542.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3542.yaml new file mode 100644 index 000000000..c60a6dac0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3542.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - add ``comment`` option. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3543.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3543.yaml new file mode 100644 index 000000000..7f3f71b19 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3543.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_snapmirror - add new options ``source_endpoint`` and ``destination_endpoint`` to group endpoint suboptions. + - na_ontap_snapmirror - add new suboptions ``consistency_group_volumes`` and ``ipspace`` to endpoint options. + - na_ontap_snapmirror - improve error reporting or warn when REST option is not supported. + - na_ontap_snapmirror - deprecate older options for source and destination paths, volumes, vservers, and clusters. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3571.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3571.yaml new file mode 100644 index 000000000..5961068ab --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3571.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_node - added REST support for ONTAP node modify and rename. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3579.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3579.yaml new file mode 100644 index 000000000..2cf897321 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3579.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_lun - ``qos_policy_group`` could not be modified if a value was not provided at creation. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3580.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3580.yaml new file mode 100644 index 000000000..c0c00c183 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3580.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - new option ``qos_adaptive_policy_group``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3595.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3595.yaml new file mode 100644 index 000000000..d1395562f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3595.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapmirror - report warning when relationship is present but not healthy. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3615.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3615.yaml new file mode 100644 index 000000000..39d1a41bd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3615.yaml @@ -0,0 +1,2 @@ +bugfixes: + - all modules - fix traceback TypeError 'NoneType' object is not subscriptable when hostname points to a web server. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3623.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3623.yaml new file mode 100644 index 000000000..d0ca314f8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3623.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_lun - tiering options were ignored in san_application_template. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3625.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3625.yaml new file mode 100644 index 000000000..a0c76437c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3625.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - returns an error now if deleting a volume with REST api fails. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3626.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3626.yaml new file mode 100644 index 000000000..0807a510e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3626.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - unmount volume before deleting it when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3628.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3628.yaml new file mode 100644 index 000000000..aee869c57 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3628.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cluster - ignore ZAPI EMS log error when in pre-cluster mode. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3632.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3632.yaml new file mode 100644 index 000000000..e7892c040 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3632.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_info - add quota-policy-info. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3633.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3633.yaml new file mode 100644 index 000000000..0fb9e987c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3633.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - report error from resize operation when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3649.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3649.yaml new file mode 100644 index 000000000..3ee9d28ac --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3649.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_autosupport - warn when password is present in ``proxy_url`` as it makes the operation not idempotent. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3654.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3654.yaml new file mode 100644 index 000000000..47181976a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3654.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - new suboption ``dr_cache`` when creating flexcache using NAS application template. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3655.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3655.yaml new file mode 100644 index 000000000..7b84f698e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3655.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_flexcache - support for ``prepopulate`` option when using REST (requires ONTAP 9.8). + - na_ontap_flexcache - mount/unmount the FlexCache volume when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3662.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3662.yaml new file mode 100644 index 000000000..0456f145f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3662.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_node - KeyError fix for location ans asset-tag parameters in get_node(). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3667.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3667.yaml new file mode 100644 index 000000000..21e61de81 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3667.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_debug - improve error reporting for import errors on netapp_lib. + - na_ontap_info - improve error reporting for import errors on netapp_lib, json, xlmtodict. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3668.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3668.yaml new file mode 100644 index 000000000..0089452ab --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3668.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - changes in ``encrypt`` settings were ignored. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3671.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3671.yaml new file mode 100644 index 000000000..f3843f8de --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3671.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume efficiency when it does not exist. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3677.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3677.yaml new file mode 100644 index 000000000..9d41da251 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3677.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_flexcache - support REST APIs in addition to ZAPI for create and delete. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3685.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3685.yaml new file mode 100644 index 000000000..9cb201081 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3685.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_ldap_client - ``port`` was incorrectly used instead of ``tcp_port``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3716.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3716.yaml new file mode 100644 index 000000000..3a2dc12cb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3716.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - SVM scoped policies were not found when using a destination path with REST application. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3718.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3718.yaml new file mode 100644 index 000000000..4ebea4117 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3718.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume_efficiency - to allow for FAS ONTAP systems to enable volume efficiency when it does not exist and apply additional parameters. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3754.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3754.yaml new file mode 100644 index 000000000..7805e54cd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3754.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - check for consistency_group_volumes always fails on 9.7, and cluster or ipspace when using endpoints with ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3757.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3757.yaml new file mode 100644 index 000000000..5b1d2f19b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3757.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - warn when attempting to modify application only options. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3767.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3767.yaml new file mode 100644 index 000000000..42cdf714a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3767.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_lun - ignore small increase (lower than provisioned) and small decrease (< 10%) in ``total_size``. +bugfixes: + - na_ontap_lun - SAN application is not supported on 9.6 and only partially supported on 9.7 (no modify). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3772.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3772.yaml new file mode 100644 index 000000000..e6d7cd31d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3772.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_igroups - new option ``initiator_objects`` to support initiator comments (requires ONTAP 9.9). + - na_ontap_igroups - new option ``initiator_names`` as a replacement for ``initiators`` (still supported as an alias). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3801.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3801.yaml new file mode 100644 index 000000000..b244e7883 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3801.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_svm - iscsi current status is not read correctly (mispelled issi). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3807.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3807.yaml new file mode 100644 index 000000000..5378d29a8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3807.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_vserver_peer - new options ``local_name_for_source`` and ``local_name_for_peer`` added. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3811.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3811.yaml new file mode 100644 index 000000000..5483dcf24 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3811.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - allow new LUNs to use different igroup or os_type when using SAN application. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3812.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3812.yml new file mode 100644 index 000000000..24858bd7b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3812.yml @@ -0,0 +1,9 @@ +minor_changes: + - na_ontap_volume_efficiency - new option to allow volume efficiency to be started and stopped 'volume_efficiency'. + - na_ontap_volume_efficiency - new option 'start_ve_scan_all' scan the entire volume without applying share block optimization. + - na_ontap_volume_efficiency - new option 'start_ve_build_metadata' scan the entire and generate fingerprint database. + - na_ontap_volume_efficiency - new option 'start_ve_delete_checkpoint' delete checkpoint and start the operation from the begining. + - na_ontap_volume_efficiency - new option 'start_ve_queue_operation' queue if an exisitng operation is already running. + - na_ontap_volume_efficiency - new option 'start_ve_scan_old_data' scan the file system to process all the existing data. + - na_ontap_volume_efficiency - new option 'start_ve_qos_policy' defines the QoS policy for the operation. + - na_ontap_volume_efficiency - new option 'stop_ve_all_operations' all running and queued operations to be stopped. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3830.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3830.yaml new file mode 100644 index 000000000..9d7684284 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3830.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - ignore read error because of insufficient privileges for efficiency options so that the module can be run as vsadmin. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3850.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3850.yaml new file mode 100644 index 000000000..8731dfdf4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3850.yaml @@ -0,0 +1,13 @@ +major_changes: + - na_ontap_autosupport - Added REST support to the module. +minor_changes: + - na_ontap_autosupport - new option ``nht_data_enabled`` to specify whether the disk health data is collected as part of the AutoSupport data. + - na_ontap_autosupport - new option ``perf_data_enabled`` to specify whether the performance data is collected as part of the AutoSupport data. + - na_ontap_autosupport - new option ``retry_count`` to specify the maximum number of delivery attempts for an AutoSupport message. + - na_ontap_autosupport - new option ``reminder_enabled`` to specify whether AutoSupport reminders are enabled or disabled. + - na_ontap_autosupport - new option ``max_http_size`` to specify delivery size limit for the HTTP transport protocol (in bytes). + - na_ontap_autosupport - new option ``max_smtp_size`` to specify delivery size limit for the SMTP transport protocol (in bytes). + - na_ontap_autosupport - new option ``private_data_removed`` to specify the removal of customer-supplied data. + - na_ontap_autosupport - new option ``local_collection_enabled`` to specify whether collection of AutoSupport data when the AutoSupport daemon is disabled. + - na_ontap_autosupport - new option ``ondemand_enabled`` to specify whether the AutoSupport OnDemand Download feature is enabled. + - na_ontap_autosupport - new option ``validate_digital_certificate`` which when set to true each node will validate the digital certificates that it receives. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3870.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3870.yaml new file mode 100644 index 000000000..23aad1e5c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3870.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_info - Added "autosupport_check_info" to the attributes that will be collected when gathering info using the module. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3883.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3883.yaml new file mode 100644 index 000000000..9721b28a7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3883.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_qtree - wait for completion when creating or modifying a qtree with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3900.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3900.yaml new file mode 100644 index 000000000..5406b25a7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3900.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - Added "autosupport_check_info"/"support/autosupport/check" to the attributes that will be collected when gathering info using the module. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3926.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3926.yaml new file mode 100644 index 000000000..7264f3b15 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3926.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_autosupport - TypeError - '>' not supported between instances of 'str' and 'list'. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3939.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3939.yaml new file mode 100644 index 000000000..c546b8739 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3939.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_snapshot - new option ``expiry_time``. + - na_ontap_snapshot - add REST support to create, modify, rename, and delete snapshot. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3950.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3950.yaml new file mode 100644 index 000000000..e4074435a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3950.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_quotas - fail to reinitialize on create if quota is already on. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3952.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3952.yaml new file mode 100644 index 000000000..46ac10fa3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3952.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_disks - new option min_spares. + - na_ontap_disks - added REST support for the module. + - na_ontap_disks - added functionality to reassign spare disks from a partner node to the desired node. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3969.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3969.yaml new file mode 100644 index 000000000..e0d1cb24a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3969.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs - new option ``comment`` to associate a description to a CIFS share. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3971.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3971.yaml new file mode 100644 index 000000000..54e9fadeb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3971.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_autosupport - KeyError - No element by given name validate-digital-certificate. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3973.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3973.yaml new file mode 100644 index 000000000..381ebd6c6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3973.yaml @@ -0,0 +1,6 @@ +bugfixes: + - na_ontap_flexcache - one occurrence of msg missing in call to fail_json. + - na_ontap_igroup - one occurrence of msg missing in call to fail_json. + - na_ontap_lun - three occurrencse of msg missing in call to fail_json. + - na_ontap_lun_map_reporting_nodes - one occurrence of msg missing in call to fail_json. + - na_ontap_snapmirror - one occurrence of msg missing in call to fail_json. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3983.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3983.yaml new file mode 100644 index 000000000..4e9cae6b6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3983.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_lun - new suboption ``exclude_aggregates`` for SAN application. + - na_ontap_volume - new suboption ``exclude_aggregates`` for NAS application. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3994.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3994.yaml new file mode 100644 index 000000000..b09f148ab --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-3994.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_igroups - nested igroups are not supported on ONTAP 9.9.0 but are on 9.9.1. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4005.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4005.yaml new file mode 100644 index 000000000..f95fad3ad --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4005.yaml @@ -0,0 +1,2 @@ +minor_changes: + - License displayed correctly in Github \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4010.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4010.yaml new file mode 100644 index 000000000..312b94c77 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4010.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_iscsi_security - cannot change authentication_type + - na_ontap_iscsi_security - IndexError list index out of range if vserver does not exist diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4022.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4022.yaml new file mode 100644 index 000000000..46a017c70 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4022.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cluster_peer - KeyError on dest_cluster_name if destination is unreachable. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4026.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4026.yaml new file mode 100644 index 000000000..fe41d4376 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4026.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume_clone - ``parent_vserver`` can not be given with ``junction_path``, ``uid``, or ``gid`` diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4031.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4031.yaml new file mode 100644 index 000000000..fccd38aaf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4031.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - added file_directory_security to return the effective permissions of the directory. When using file_directory_security it must be called with gather_subsets and path and vserver must be specified in parameters. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4039.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4039.yaml new file mode 100644 index 000000000..7fa8129b6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4039.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_rest_info - add examples for ``parameters`` option. + - na_ontap_volume - show warning when resize is ignored because threshold is not reached. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4048.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4048.yaml new file mode 100644 index 000000000..91c5060f0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4048.yaml @@ -0,0 +1,9 @@ +bugfixes: + - na_ontap_snapmirror - support for SSL certificate authentication for both sides when using ONTAP. + - na_ontap_snapmirror - fix issues where there was no wait on quiesce before aborting. + - na_ontap_snapmirror - fix issues where there was no wait on the relationship to end transferring. +minor_changes: + - na_ontap_snapmirror - new option ``peer_options`` to define source connection parameters. + - na_ontap_snapmirror - new option ``transferring_time_out`` to define how long to wait for transfer to complete on create or initialize. + - na_ontap_snapmirror - when deleting, attempt to delete even when the relationship cannot be broken. + - na_ontap_snapmirror - rewrite update for REST using POST to initiate transfer. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4049.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4049.yaml new file mode 100644 index 000000000..14a87cedb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4049.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_export_policy_rule - change ``anonymous_user_id`` type to str to accept user name and user id. (A warning is now triggered when a number is not quoted.) diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4060.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4060.yaml new file mode 100644 index 000000000..cec58942c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4060.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_vserver_create role - add ``nfsv3``, ``nfsv4``, ``nfsv41`` options. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4079.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4079.yaml new file mode 100644 index 000000000..96c50bb5f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4079.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_active_directory - Fixed idempotency and traceback issues. + - na_ontap_info - Add active_directory_account_info. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4113.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4113.yaml new file mode 100644 index 000000000..b14ebcf5a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4113.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_net_port - new option ``up_admin`` to set administrative state. + - na_ontap_net_port - change option types to bool and int respectively for ``autonegotiate_admin`` and ``mtu``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4114.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4114.yml new file mode 100644 index 000000000..9423c9c22 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4114.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_flexcache - corrected module name in documentation Examples diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4116.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4116.yaml new file mode 100644 index 000000000..4e7131eb8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4116.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_job_schedule - new option ``month_offset`` to explictly select 0 or 1 for January. +bugfixes: + - na_ontap_job_schedule - fix documentation for REST ranges for months. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4119.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4119.yaml new file mode 100644 index 000000000..950140b11 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4119.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume_efficiency - new option ``storage_efficiency_mode`` for AFF only with 9.10.1 or later. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4121.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4121.yaml new file mode 100644 index 000000000..c28379fc5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4121.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_export_policy_rule -- Added Rest support for Export Policy Rules \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4122.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4122.yaml new file mode 100644 index 000000000..587fb8d56 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4122.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snmp - Added REST support to the SNMP module diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4123.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4123.yaml new file mode 100644 index 000000000..0c279f32b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4123.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cluster - Added REST support to the cluster module. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4140.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4140.yaml new file mode 100644 index 000000000..af8bd17b9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4140.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_quotas - attempt to retry on ``13001:success`` ZAPI error. Add debug data. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4150.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4150.yaml new file mode 100644 index 000000000..f2d70e517 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4150.yaml @@ -0,0 +1,3 @@ +bugfixes: + - all REST modules - 9.4 and 9.5 were incorrectly detected as supporting REST. + - na_ontap_snapmirror - improve error message when option is not supported with ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4157.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4157.yaml new file mode 100644 index 000000000..6116ab1dd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4157.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_info - add computed serial_hex and naa_id for lun_info. + - na_ontap_rest_info - add computed serial_hex and naa_id for storage/luns when serial_number is present. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4159.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4159.yaml new file mode 100644 index 000000000..229e52e3e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4159.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_rest_cli - removed incorrect statement indicating that console access is required. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4161.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4161.yaml new file mode 100644 index 000000000..39043e675 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4161.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_rest_info - The Default for ``gather_subset`` has been changed to demo which returns ``cluster/software``, ``svm/svms``, ``cluster/nodes``. To return all Info must specificly list ``all`` in your playbook. Do note ``all`` is a very resource-intensive action and it is highly recommended to call just the info/APIs you need. + - na_ontap_rest_info - All Info that exist in ``na_ontap_info`` that has REST equivalents have been implemented. Note that the returned structure for REST and the variable names in the structure is different from the ZAPI based ``na_ontap_info``. Some default variables in ZAPI are no longer returned by default in REST and will need to be specified using the ``field`` option. + - na_ontap_rest_info - The following info subsets have been added ``system_node_info``, ``net_interface_info``, ``net_port_info``, ``security_login_account_info``, ``vserver_peer_info``, ``cluster_image_info``, ``cluster_log_forwarding_info``, ``metrocluster_info``, ``metrocluster_node_info``, ``net_dns_info``, ``net_interface_service_policy_info``, ``vserver_nfs_info``, ``clock_info``, ``igroup_info``, ``vscan_status_info``, ``vscan_connection_status_all_info``, ``storage_bridge_info``, ``nvme_info``, ``nvme_interface_info``, ``nvme_subsystem_info``, ``cluster_switch_info``, ``export_policy_info``, ``kerberos_realm_info``,``sis_info``, ``sis_policy_info``, ``snapmirror_info``, ``snapmirror_destination_info``, ``snapmirror_policy_info``, ``sys_cluster_alerts``, ``cifs_vserver_security_info`` diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4175.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4175.yaml new file mode 100644 index 000000000..ae3bd6fb4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4175.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_svm - new REST options of svm admin_state ``stopped`` and ``running`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4177.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4177.yaml new file mode 100644 index 000000000..c1b51e550 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4177.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - new option ``use_python_keys`` to replace ``svm/svms`` with ``svm_svms`` to simplify post processing. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4179.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4179.yml new file mode 100644 index 000000000..cedf71b0f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4179.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - ``tiering_minimum_cooling_days`` to specify how many days must pass before inactive data in a volume using the Auto or Snapshot-Only policy is considered cold and eligible for tiering. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4190.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4190.yaml new file mode 100644 index 000000000..adcfc7ee7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4190.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_cifs_server - ``force`` option is supported when state is absent to ignore communication errors. +bugfixes: + - na_ontap_vserver_delete role - delete iSCSI igroups and CIFS server before deleting vserver. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4191.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4191.yaml new file mode 100644 index 000000000..d9b751a0e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4191.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_object_store - new option ``port``, ``certificate_validation_enabled``, ``ssl_enabled`` for target server. +bugfixes: + - na_ontap_object_store - when using REST, wait for job status to correctly report errors. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4196.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4196.yaml new file mode 100644 index 000000000..b8ad47141 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4196.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_object_store - support modifying an object store config with REST. + - na_ontap_object_store - new REST options ``owner`` and ``change_password``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4197.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4197.yaml new file mode 100644 index 000000000..1797fcbab --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4197.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_rest_info - support added for protocols/vscan/on-access-policies. + - na_ontap_rest_info - support added for protocols/vscan/on-demand-policies. + - na_ontap_rest_info - support added for protocols/vscan/scanner-pools. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4206.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4206.yaml new file mode 100644 index 000000000..de15ebf14 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4206.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_vserver_delete role - added set_fact to accept ``netapp_{hostname|username|password}`` or ``hostname,username and password`` variables. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4218.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4218.yaml new file mode 100644 index 000000000..62a69eddf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4218.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_svm - new option ``services`` to allow and/or enable protocol services. + - na_ontap_svm - new option ``ignore_rest_unsupported_options`` to ignore older ZAPI options not available in REST. + - na_ontap_svm - ignore ``aggr_list`` with ``'*'`` when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4227.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4227.yaml new file mode 100644 index 000000000..ade004748 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4227.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_job_schedule - fix idempotency issue with REST when job_minutes is set to -1. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4228.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4228.yaml new file mode 100644 index 000000000..6d4755553 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4228.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR15 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4231.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4231.yaml new file mode 100644 index 000000000..3edced8b3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4231.yaml @@ -0,0 +1,2 @@ +bugfixes: + - all modules - traceback on ONTAP 9.3 (and earlier) when trying to detect REST support. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4235.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4235.yaml new file mode 100644 index 000000000..fc49d44f2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4235.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_interface - new option ``from_name`` to rename an interface. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4243.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4243.yaml new file mode 100644 index 000000000..2e1783f8c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4243.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_software_update - remove ``absent`` as a choice for ``state`` as it has no use. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4255.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4255.yaml new file mode 100644 index 000000000..6695cfa3b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4255.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_ldap_client - remove limitation on schema so that custom schemas can be used. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4256.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4256.yaml new file mode 100644 index 000000000..b04be1e0d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4256.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_software_update - new option ``validate_after_download`` to run ONTAP software update validation checks. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4270.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4270.yaml new file mode 100644 index 000000000..eff369d69 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4270.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_svm - new option ``max_volumes``. + - na_ontap_svm - support ``allowed protocols`` with REST for ONTAP 9.6 and later. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4288.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4288.yaml new file mode 100644 index 000000000..6e61ea831 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4288.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_job_schedule - fix idempotency issue with ZAPI when job_minutes is set to -1. + - na_ontap_job_schedule - cannot modify options not present in create when using REST. + - na_ontap_job_schedule - modify error if month is present but not changed with 0 offset when using REST. + - na_ontap_vserver_delete role - fix typos for cifs. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4289.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4289.yaml new file mode 100644 index 000000000..85a29be13 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4289.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapshot - ``expiry_time`` required REST api, will return error if set when using ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4300.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4300.yaml new file mode 100644 index 000000000..fe7ba0327 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4300.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_job_schedule - modify error if month is changed from some values to all (-1) when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4312.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4312.yml new file mode 100644 index 000000000..7c7354de2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4312.yml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_export_policy - fix error if more than 1 verser matched search name, the wrong uuid could be given diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4319.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4319.yaml new file mode 100644 index 000000000..ad644af5d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4319.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - ``source_path`` and ``source_hostname`` parameters are not mandatory to delete snapmirror relationship when source cluster is unknown, if specified it will delete snapmirror at destination and release the same at source side. if not, it only deletes the snapmirror at destination and will not look for source to perform snapmirror release. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4320.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4320.yaml new file mode 100644 index 000000000..4b9e4610b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4320.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_firewall_policy - added ``none`` as a choice for ``service`` which is supported from 9.8 ONTAP onwards. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4325.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4325.yml new file mode 100644 index 000000000..46b9fa206 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4325.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - update documention for `fields` to clarify the list of fields that are return by default. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4329.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4329.yaml new file mode 100644 index 000000000..3c3b3483e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4329.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun_map - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4331.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4331.yaml new file mode 100644 index 000000000..083ae35ad --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4331.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume_clone - Added REST support. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4332.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4332.yaml new file mode 100644 index 000000000..eee509f11 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4332.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapmirror - Added REST support to the na_ontap_snapmirror module diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4333.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4333.yaml new file mode 100644 index 000000000..e6a18face --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4333.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapshot_policy - Added REST support to the na_ontap_snapshot_policy module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4334.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4334.yaml new file mode 100644 index 000000000..632f1466f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4334.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_net_vlan - Added REST support to the net vlan module. + - na_ontap_net_vlan - new REST options ``broadcast_domain``, ``ipspace`` and ``enabled`` added. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4335.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4335.yaml new file mode 100644 index 000000000..40733bba9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4335.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_license - Added REST support to the license module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4336.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4336.yaml new file mode 100644 index 000000000..8955d7736 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4336.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_motd - deprecated in favor of ``na_ontap_login_messages``. Fail when use_rest is set to ``always`` as REST is not supported. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4337.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4337.yaml new file mode 100644 index 000000000..815c96a46 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4337.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_aggregate - Added REST support. +bugfixes: + - na_ontap_aggregate - Fixed KeyError on unmount_volumes when offlining a volume if option is not set. + - na_ontap_aggregate - Report an error when attempting to change snaplock_type. + - module_utils - fixed KeyError on Allow when using OPTIONS method and the API failed. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4338.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4338.yml new file mode 100644 index 000000000..06f2c1d34 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4338.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_broadcast_domain_ports - warn about deprecation, fall back to ZAPI or fail when REST is desired. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4339.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4339.yaml new file mode 100644 index 000000000..88d65f8c9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4339.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_net_port - Added REST support to the net port module diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4340.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4340.yaml new file mode 100644 index 000000000..6a9d5800c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4340.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_net_ifgrp - Added REST support to the net ifgrp module. + - na_ontap_net_ifgrp - new REST only options ``from_lag_ports``, ``broadcast_domain`` and ``ipspace`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4341.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4341.yaml new file mode 100644 index 000000000..fd18f948e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4341.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs_share - Added REST support to the cifs share module. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4342.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4342.yml new file mode 100644 index 000000000..e80c9fc70 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4342.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - Added REST support. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4343.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4343.yaml new file mode 100644 index 000000000..bbd3989c4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4343.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_cluster_peer - Added REST support to the cluster_peer module. +bugfixes: + - na_ontap_cluster_peer - Fixed KeyError if both ``source_intercluster_lifs`` and ``dest_intercluster_lifs`` not present in cluster create. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4344.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4344.yaml new file mode 100644 index 000000000..f53f057b8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4344.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_vserver_peer - Added REST support to the vserver_peer module diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4345.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4345.yaml new file mode 100644 index 000000000..356c13ca8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4345.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_fcp -- Added REST support for FCP \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4347.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4347.yaml new file mode 100644 index 000000000..e325b3925 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4347.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_ucadapter - added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4348.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4348.yaml new file mode 100644 index 000000000..08d9f86ae --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4348.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_net_routes - ``metric`` option is supported from ONTAP 9.11.0 or later in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4349.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4349.yaml new file mode 100644 index 000000000..ab03626b0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4349.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs_server - Added REST support to the cifs server module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4350.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4350.yaml new file mode 100644 index 000000000..42e22ae77 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4350.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_nfs - Added Rest Support \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4367.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4367.yaml new file mode 100644 index 000000000..d960d8290 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4367.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - new REST option ``analytics`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4391.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4391.yaml new file mode 100644 index 000000000..278f198c8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4391.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cluster - ``single_node_cluster`` was silently ignored with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4392.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4392.yaml new file mode 100644 index 000000000..84284e06f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4392.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - modify policy, schedule and other parameter failure are fixed. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4393.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4393.yaml new file mode 100644 index 000000000..d1d521c9c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4393.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_igroup - ``force_remove_initiator`` option was ignored when removing initiators from existing igroup. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4394.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4394.yaml new file mode 100644 index 000000000..ddee15ec8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4394.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_security_certificates - ``intermediate_certificates`` option was ignored. + - four modules (mediator, metrocluster, security_certificates, wwpn_alias) would report a None error when REST is not available. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4399.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4399.yaml new file mode 100644 index 000000000..5751b2cf5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4399.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_storage_failover - KeyError on 'ha' if the system is not configured as HA. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4401.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4401.yaml new file mode 100644 index 000000000..9aca5a8eb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4401.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_cluster - add ``force`` option when deleting a node. +bugfixes: + - na_ontap_cluster - switch to ZAPI when DELETE is required with ONTAP 9.6. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4404.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4404.yaml new file mode 100644 index 000000000..d1e516efb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4404.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapshot - ``snapmirror_label`` is supported with REST on ONTAP 9.7 or higher, report error if used on ONTAP 9.6. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4415.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4415.yaml new file mode 100644 index 000000000..0396baccf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4415.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_quotas - fix idempotency issue on ``disk_limit`` and ``soft_disk_limit``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4417.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4417.yaml new file mode 100644 index 000000000..2e243b82e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4417.yaml @@ -0,0 +1,2 @@ +known_issues: + - na_ontap_snapshot - added documentation to use UTC format for ``expiry_time``. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4435.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4435.yml new file mode 100644 index 000000000..7afd4a4e7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4435.yml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_svm - module will on init if a rest only and zapi only option are used at the same time. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4439.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4439.yaml new file mode 100644 index 000000000..60a65b037 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4439.yaml @@ -0,0 +1,2 @@ +bugfixes: + - cluster scoped modules are failing on FSx with 'Vserver API missing vserver parameter' error. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4449.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4449.yaml new file mode 100644 index 000000000..8e57b8379 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4449.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_svm - Added documentation for ``allowed_protocol``, ndmp is default in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4457.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4457.yaml new file mode 100644 index 000000000..6515757f3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4457.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_vserver_delete role - do not report an error if the vserver does not exist. +bugfixes: + - na_ontap_vserver_delete role - report error if ONTAP version is 9.6 or older. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4459.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4459.yaml new file mode 100644 index 000000000..805794795 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4459.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_aggregate - new option ``encryption`` to enable encryption with ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4460.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4460.yaml new file mode 100644 index 000000000..ab0aa7693 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4460.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_net_routes - metric was not always modified with ZAPI. + - na_ontap_net_routes - support cluster-scoped routes with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4465.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4465.yml new file mode 100644 index 000000000..e711ef0e0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4465.yml @@ -0,0 +1,2 @@ +bugfixes: + - fix error where module will fail for ONTAP 9.6 if use_rest was set to auto diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4479.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4479.yaml new file mode 100644 index 000000000..288c6b3f0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4479.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_cifs_local_user_modify - unexpected argument ``name`` error with REST. + - na_ontap_cifs_local_user_modify - KeyError on ``description`` or ``full_name`` with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4487.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4487.yaml new file mode 100644 index 000000000..0d8a6fe51 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4487.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_net_ifgrp - fix error in modify ports with zapi. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4501.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4501.yaml new file mode 100644 index 000000000..178ed2f06 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4501.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_broadcast_domain - new REST only option ``from_ipspace`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4508.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4508.yaml new file mode 100644 index 000000000..1ba1f3c81 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4508.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - If using REST and ONTAP 9.6 and `efficiency_policy` module will fail as `efficiency_policy` is not supported in ONTAP 9.6. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4526.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4526.yaml new file mode 100644 index 000000000..d4fd04639 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4526.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - Fixed issue that would fail the module in REST when changing `is_online` if two vserver volume had the same name. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4527.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4527.yaml new file mode 100644 index 000000000..d2ccda500 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4527.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - Fixed error with unmounting junction_path in rest. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4540.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4540.yaml new file mode 100644 index 000000000..de5f20ee4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4540.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_volume - ``logical_space_enforcement`` to specifies whether to perform logical space accounting on the volume. + - na_ontap_volume - ``logical_space_reporting`` to specifies whether to report space logically on the volume. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4554.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4554.yaml new file mode 100644 index 000000000..22ed97b83 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4554.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_ports - Added REST support to the ports module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4565.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4565.yaml new file mode 100644 index 000000000..5fdc2a0b3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4565.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume_efficiency - Removed restriction on policy name. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4566.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4566.yaml new file mode 100644 index 000000000..3840a5a78 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4566.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_info - fix KeyError on node for aggr_efficiency_info option against a metrocluster system. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4568.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4568.yaml new file mode 100644 index 000000000..a2c8e6a8a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4568.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_broadcast_domain - fix idempotency issue when ``ports`` has identical values. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4573.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4573.yaml new file mode 100644 index 000000000..1638057fe --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4573.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_export_policy_rule - Fixed bug that prevent ZAPI and REST calls from working correctly \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4577.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4577.yaml new file mode 100644 index 000000000..fec14574e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4577.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_user - Fixed lock state is not set if password is not changed. + - na_ontap_user - Fixed TypeError 'tuple' object does not support item assignment. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4588.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4588.yaml new file mode 100644 index 000000000..21aa34406 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4588.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_export_policy_rule - new option ``ntfs_unix_security`` for NTFS export UNIX security options added. +bugfixes: + - Fixed ONTAP minor version ignored in checking minimum ONTAP version. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4604.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4604.yaml new file mode 100644 index 000000000..a5ba75b52 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4604.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs_acl - new option ``type`` for user-group-type. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4605.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4605.yaml new file mode 100644 index 000000000..22a78e6e7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4605.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs_acl - Added REST support to the cifs share access control module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4606.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4606.yaml new file mode 100644 index 000000000..a3317404c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4606.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_quotas - fix another quota operation is currently in progress issue. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4609.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4609.yaml new file mode 100644 index 000000000..9952a13af --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4609.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_user - Fixed issue when attempting to change pasword for absent user when set_password is set. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4612.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4612.yaml new file mode 100644 index 000000000..226e5e5eb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4612.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_aggregate - Fixed error in delete aggregate if the ``disk_count`` is less than current disk count. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4621.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4621.yaml new file mode 100644 index 000000000..b1e18e207 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4621.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_aggregate - Added ``disk_class`` option for REST and ZAPI. + - na_ontap_aggregate - Extended accepted ``disk_type`` values for ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4623.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4623.yaml new file mode 100644 index 000000000..7300cfe4a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4623.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - Fixed error when creating a flexGroup when ``aggregate_name`` and ``aggr_list_multiplier`` are not set in rest. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4644.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4644.yaml new file mode 100644 index 000000000..3bd9d62dd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4644.yaml @@ -0,0 +1,7 @@ +minor_changes: + - na_ontap_volume - ``wait_for_completion`` and ``check_interval`` is now supported for volume move and encryption in REST. + - na_ontap_volume - new option ``max_wait_time`` added. +bugfixes: + - na_ontap_volume - fix error when trying to move encrypted volume and ``encrypt`` is True in REST. + - na_ontap_volume - fix error when trying to unencrypt volume in REST. + - na_ontap_volume - fix KeyError on ``aggregate_name`` when trying to unencrypt volume in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4645.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4645.yaml new file mode 100644 index 000000000..828d0fa35 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4645.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_aggregate - Fixed UUID issue when attempting to attach object store as part of creating the aggregate with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4648.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4648.yaml new file mode 100644 index 000000000..8c3328def --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4648.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume_clone - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4676.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4676.yaml new file mode 100644 index 000000000..7257ae062 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4676.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs_server - error out if ZAPI only options ``force`` or ``workgroup`` are used with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4679.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4679.yaml new file mode 100644 index 000000000..b503f413a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4679.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_rest_info - Fixed example with wrong indentation for ``use_python_keys``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4691.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4691.yaml new file mode 100644 index 000000000..eebfbdf91 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4691.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cluster_peer - report an error if there is an attempt to use the already peered clusters. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4711.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4711.yaml new file mode 100644 index 000000000..c050de0b7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4711.yaml @@ -0,0 +1,2 @@ +minor_changes: + - all modules that only support ZAPI - warn when ``use_rest`` with a value of ``always`` is ignored. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4716.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4716.yaml new file mode 100644 index 000000000..167bcfecd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4716.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs_acl - use ``type`` if present when fetching existing ACL with ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4719.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4719.yml new file mode 100644 index 000000000..126eec8fd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4719.yml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_rest_info - Fixed an issues with adding field to specific info that didn't have a direct REST equivalent. + - na_ontap_lun_map - Fixed bug when deleting lun map using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4729.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4729.yml new file mode 100644 index 000000000..4efffd7f0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4729.yml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_lun_map - fixed bugs resulting in REST support to not work. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4731.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4731.yaml new file mode 100644 index 000000000..13d722c66 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4731.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_volume - add support for SnapLock - only for REST. +bugfixes: + - na_ontap_snapshot - fix key error on volume when using REST. + - na_ontap_snapshot - add error message if volume is not found with REST. + - na_ontap_volume - fix idempotency issue with compression settings when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4735.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4735.yaml new file mode 100644 index 000000000..c4ea5201b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4735.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs - fixed `symlink_properties` option silently ignored for cifs share creation when using REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4736.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4736.yaml new file mode 100644 index 000000000..eee218fcb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4736.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - Added use_rest condition for the REST support to work when use_rest `always`. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4737.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4737.yaml new file mode 100644 index 000000000..373bc2811 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4737.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_disk_options - ONTAP 9.10.1 returns on/off rather than True/False. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4743.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4743.yaml new file mode 100644 index 000000000..6cbaf0fb0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4743.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_lun_map - TypeError - '>' not supported between instances of 'int' and 'str '. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4745.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4745.yaml new file mode 100644 index 000000000..33f6f34ee --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4745.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_debug - report ansible version and ONTAP collection version. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4747.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4747.yaml new file mode 100644 index 000000000..995d0fd6f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4747.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_vserver_create role - support max_volumes option. + - na_ontap_cluster_config role - use na_ontap_login_messages as na_ontap_motd is deprecated. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4762.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4762.yaml new file mode 100644 index 000000000..135895cb2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4762.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs_local_user_set_password - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4763.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4763.yaml new file mode 100644 index 000000000..2e73f79c0 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4763.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cluster_ha - added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4764.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4764.yaml new file mode 100644 index 000000000..8d5561118 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4764.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_efficiency_policy - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4767.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4767.yaml new file mode 100644 index 000000000..668147c56 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4767.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_flexcache - properly use ``origin_cluster`` in GET but not in POST when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4769.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4769.yaml new file mode 100644 index 000000000..55eef554a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4769.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_igroup_initiator - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4770.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4770.yaml new file mode 100644 index 000000000..58cab8ba6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4770.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_iscsi - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4771.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4771.yaml new file mode 100644 index 000000000..4e0392ee7 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4771.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_kerberos_realm - added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4773.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4773.yaml new file mode 100644 index 000000000..a0f046007 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4773.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_ldap_client - Added REST support. + - na_ontap_ldap_client - Added ``ldaps_enabled`` option in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4774.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4774.yaml new file mode 100644 index 000000000..9b7168c58 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4774.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_lun_copy - added REST support. +bugfixes: + - na_ontap_lun_copy - fix key error on ``source_vserver`` option. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4775.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4775.yaml new file mode 100644 index 000000000..133d0cf77 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4775.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun_map_reporting_nodes - added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4776.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4776.yaml new file mode 100644 index 000000000..07377d36c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4776.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_name_service_switch - added REST support. +bugfixes: + - na_ontap_name_service_switch - fix AttributeError 'NoneType' object has no attribute 'get_children' if ``sources`` is '-' in current. + - na_ontap_name_service_switch - fix idempotency issue on ``sources`` option. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4779.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4779.yaml new file mode 100644 index 000000000..47e6f720f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4779.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_nvme - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4780.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4780.yaml new file mode 100644 index 000000000..7e47d7c98 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4780.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_nvme_namespace - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4781.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4781.yaml new file mode 100644 index 000000000..941766f90 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4781.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_nvme_subsystem - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4784.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4784.yaml new file mode 100644 index 000000000..27c7b70a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4784.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_portset - Added REST support. +bugfixes: + - na_ontap_portset - fixed idempotency issue when ``ports`` has identical values. + - na_ontap_portset - fixed error when trying to remove partial ports from portset if igroups are bound to it. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4785.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4785.yaml new file mode 100644 index 000000000..8cec3e552 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4785.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_qos_adaptive_policy_group - warn about deprecation, fall back to ZAPI or fail when REST is desired. + - na_ontap_qos_policy_group - Added REST only supported option ``adaptive_qos_options`` for configuring adaptive policy. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4786.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4786.yaml new file mode 100644 index 000000000..80b4e164c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4786.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_qos_policy_group - Added REST only supported option ``fixed_qos_options`` for configuring max/min throughput policy. + - na_ontap_qos_policy_group - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4788.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4788.yaml new file mode 100644 index 000000000..18ee71fd9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4788.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_quotas - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4789.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4789.yaml new file mode 100644 index 000000000..5eccd610c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4789.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_security_key_manager - added REST support. + - na_ontap_security_key_manager - new REST options ``external`` and ``vserver`` for external key manager. + - na_ontap_security_key_manager - new REST option ``onboard`` for onboard key manager. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4790.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4790.yaml new file mode 100644 index 000000000..eb53b1ead --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4790.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_service_processor_network - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4794.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4794.yaml new file mode 100644 index 000000000..f352888c3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4794.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_software_update - added REST support. +bugfixes: + - na_ontap_software_update - now reports changed=False when the package is already present. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4798.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4798.yaml new file mode 100644 index 000000000..3a32f8a15 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4798.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_unix_group - added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4799.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4799.yaml new file mode 100644 index 000000000..c749d2024 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4799.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_unix_user - Added REST support. + - na_ontap_unix_user - Added new option ``primary_gid`` aliased to ``group_id``. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4800.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4800.yaml new file mode 100644 index 000000000..f3b21aa3f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4800.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_user_role -- added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4801.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4801.yaml new file mode 100644 index 000000000..fe1526d5b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4801.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_vscan_on_access_policy - Added REST support. + - na_ontap_vscan_on_access_policy - new REST options ``scan_readonly_volumes`` and ``only_execute_access`` added. +bugfixes: + - na_ontap_vscan_on_access_policy - fixed options ``filters``, ``file_ext_to_exclude`` and ``paths_to_exclude`` cannot be reset to empty values in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4802.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4802.yaml new file mode 100644 index 000000000..e7f3acb69 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4802.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_vscan_on_demand_task - Added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4803.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4803.yaml new file mode 100644 index 000000000..1a53cefec --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4803.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_vserver_cifs_security - fall back to ZAPI when ``use_rest`` is set to ``auto`` or fail when REST is desired. + - na_ontap_vserver_cifs_security - Added option ``encryption_required_for_dc_connections`` and ``use_ldaps_for_ad_ldap`` in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4804.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4804.yaml new file mode 100644 index 000000000..f3d856a79 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4804.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_info - Fixes issue with na_ontap_info failing in 9.1 because of ``job-schedule-cluster``. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4807.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4807.yaml new file mode 100644 index 000000000..26f973327 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4807.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_vserver_peer - Fixed AttributeError if ``dest_hostname`` or ``peer_options`` not present. + - na_ontap_vserver_peer - Fixed ``local_name_for_peer`` and ``local_name_for_source`` options silently ignored in REST. + - na_ontap_vserver_peer - Added cluster peer accept code in REST. + - na_ontap_vserver_peer - Get peer cluster name if remote peer exist else use local cluster name. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4808.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4808.yaml new file mode 100644 index 000000000..aeabf6040 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4808.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_autosupport - Fixed `partner_address` not working in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4809.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4809.yaml new file mode 100644 index 000000000..beb0e986c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4809.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapmirror -- Added more descriptive error messages for REST \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4813.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4813.yaml new file mode 100644 index 000000000..124458bb4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4813.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_qtree - Fixed issue with ``oplocks`` not being changed during a modify in Zapi. + - na_ontap_qtree - Fixed issue with ``oplocks`` not warning user about not being supported in REST \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4818.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4818.yaml new file mode 100644 index 000000000..1e7684aba --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4818.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_command - document that a READONLY user is not supported, even for show commands. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4830.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4830.yaml new file mode 100644 index 000000000..828b3f8f8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4830.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_cifs_server - Added ``force`` option for create, delete and rename cifs server when using REST. + - na_ontap_cifs_server - Added ``from_name`` option to rename cifs server when using REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4832.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4832.yml new file mode 100644 index 000000000..00430ccc8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4832.yml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_iscsi - Fixed issue with ``start_state`` always being set to stopped when creating an ISCSI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4834.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4834.yaml new file mode 100644 index 000000000..3e2b0dd24 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4834.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_vserver_peer - ignore job entry doesn't exist error with REST to bypass ONTAP issue with FSx. + - na_ontap_vserver_peer - report error if SVM peer does not see a peering relationship after create. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4857.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4857.yaml new file mode 100644 index 000000000..7f169813e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4857.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_nvme_subsystem - report subsystem as absent if vserver cannot be found when attempting a delete. + - na_ontap_svm - added vserver as a convenient alias for name when using module_defaults. + - all modules - do not fail on ZAPI EMS log when vserver does not exist. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4862.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4862.yaml new file mode 100644 index 000000000..a859d7e83 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4862.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_security_ssh - Updates the SSH server configuration for the specified SVM - REST only. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4863.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4863.yaml new file mode 100644 index 000000000..71ce07407 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4863.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_ntp - new option ``key_id`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4864.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4864.yaml new file mode 100644 index 000000000..8bc9b2709 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4864.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_svm - add support for web services (ssl modify) - REST only with 9.8 or later. +bugfixes: + - na_ontap_svm - fixed KeyError issue on protocols when vserver is stopped. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4872.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4872.yaml new file mode 100644 index 000000000..e8b33a045 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4872.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_lun - Fixed KeyError on options ``force_resize``, ``force_remove`` and ``force_remove_fenced`` in Zapi. + - na_ontap_lun - Fixed ``force_remove`` option silently ignored in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4879.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4879.yaml new file mode 100644 index 000000000..a1555c9f4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4879.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapshot_policy - Do not validate parameter when state is ``absent`` and fix KeyError on ``comment``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4882.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4882.yaml new file mode 100644 index 000000000..e6c60f104 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4882.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_lun - Added ``lun_modify`` after ``app_modify`` to fix idempotency issue. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4898.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4898.yaml new file mode 100644 index 000000000..c4f472172 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4898.yaml @@ -0,0 +1,86 @@ +minor_changes: + - na_ontap_rest_info REST API's with hyphens in the name will now be converted to underscores when ``use_python_keys`` is set to ``True`` so that YAML parsing works correctly. + - na_ontap_rest_info support added for application/consistency-groups + - na_ontap_rest_info support added for cluster/fireware/history + - na_ontap_rest_info support added for cluster/mediators + - na_ontap_rest_info support added for cluster/metrocluster/dr-groups + - na_ontap_rest_info support added for cluster/metrocluster/interconnects + - na_ontap_rest_info support added for cluster/metrocluster/operations + - na_ontap_rest_info support added for cluster/ntp/keys + - na_ontap_rest_info support added for cluster/web + - na_ontap_rest_info support added for name-services/local-hosts + - na_ontap_rest_info support added for name-services/unix-groups + - na_ontap_rest_info support added for name-services/unix-users + - na_ontap_rest_info support added for network/ethernet/switch/ports + - na_ontap_rest_info support added for network/fc/ports + - na_ontap_rest_info support added for network/http-proxy + - na_ontap_rest_info support added for network/ip/bgp/peer-groups + - na_ontap_rest_info support added for protocols/audit + - na_ontap_rest_info support added for protocols/cifs/domains + - na_ontap_rest_info support added for protocols/cifs/local-groups + - na_ontap_rest_info support added for protocols/cifs/local-users + - na_ontap_rest_info support added for protocols/cifs/sessions + - na_ontap_rest_info support added for protocols/cifs/users-and-groups/privilege + - na_ontap_rest_info support added for protocols/cifs/unix-symlink-mapping + - na_ontap_rest_info support added for protocols/file-access-tracing/events + - na_ontap_rest_info support added for protocols/file-access-tracing/filters + - na_ontap_rest_info support added for protocols/fpolicy + - na_ontap_rest_info support added for protocols/locks + - na_ontap_rest_info support added for protocols/ndmp + - na_ontap_rest_info support added for protocols/ndmp/nodes + - na_ontap_rest_info support added for protocols/ndmp/sessions + - na_ontap_rest_info support added for protocols/ndmp/svms + - na_ontap_rest_info support added for protocols/nfs/connected-clients + - na_ontap_rest_info support added for protocols/nfs/kerberos/interfaces + - na_ontap_rest_info support added for protocols/nvme/subsystem-controllers + - na_ontap_rest_info support added for protocols/nvme/subsystem-maps + - na_ontap_rest_info support added for protocols/s3/buckets + - na_ontap_rest_info support added for protocols/s3/services + - na_ontap_rest_info support added for protocols/san/iscsi/sessions + - na_ontap_rest_info support added for protocols/san/portsets + - na_ontap_rest_info support added for protocols/san/vvol-bindings + - na_ontap_rest_info support added for security/anti-ransomware/suspects + - na_ontap_rest_info support added for security/audit + - na_ontap_rest_info support added for security/audit/messages + - na_ontap_rest_info support added for security/authentication/cluster/ad-proxy + - na_ontap_rest_info support added for security/authentication/cluster/ldap + - na_ontap_rest_info support added for security/authentication/cluster/nis + - na_ontap_rest_info support added for security/authentication/cluster/saml-sp + - na_ontap_rest_info support added for security/authentication/publickeys + - na_ontap_rest_info support added for security/azure-key-vaults + - na_ontap_rest_info support added for security/certificates + - na_ontap_rest_info support added for security/gcp-kms + - na_ontap_rest_info support added for security/ipsec + - na_ontap_rest_info support added for security/ipsec/ca-certificates + - na_ontap_rest_info support added for security/ipsec/policies + - na_ontap_rest_info support added for security/ipsec/security-associations + - na_ontap_rest_info support added for security/key-manager-configs + - na_ontap_rest_info support added for security/key-managers + - na_ontap_rest_info support added for security/key-stores + - na_ontap_rest_info support added for security/login/messages + - na_ontap_rest_info support added for security/ssh + - na_ontap_rest_info support added for security/ssh/svms + - na_ontap_rest_info support added for storage/cluster + - na_ontap_rest_info support added for storage/file/clone/split-loads + - na_ontap_rest_info support added for storage/file/clone/split-status + - na_ontap_rest_info support added for storage/file/clone/tokens + - na_ontap_rest_info support added for storage/monitored-files + - na_ontap_rest_info support added for storage/qos/workloads + - na_ontap_rest_info support added for storage/snaplock/audit-logs + - na_ontap_rest_info support added for storage/snaplock/compliance-clocks + - na_ontap_rest_info support added for storage/snaplock/event-retention/operations + - na_ontap_rest_info support added for storage/snaplock/event-retention/policies + - na_ontap_rest_info support added for storage/snaplock/file-fingerprints + - na_ontap_rest_info support added for storage/snaplock/litigations + - na_ontap_rest_info support added for storage/switches + - na_ontap_rest_info support added for storage/tape-devices + - na_ontap_rest_info support added for support/auto-update + - na_ontap_rest_info support added for support/auto-update/configurations + - na_ontap_rest_info support added for support/auto-update/updates + - na_ontap_rest_info support added for support/configuration-backup + - na_ontap_rest_info support added for support/configuration-backup/backups + - na_ontap_rest_info support added for support/coredump/coredumps + - na_ontap_rest_info support added for support/ems/messages + - na_ontap_rest_info support added for support/snmp + - na_ontap_rest_info support added for support/snmp/users + - na_ontap_rest_info support added for svm/migrations \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4975.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4975.yaml new file mode 100644 index 000000000..461ee4337 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4975.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_iscsi - fixed error starting iscsi service on vserver where Service, adapter, or operation already started. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4981.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4981.yaml new file mode 100644 index 000000000..c44d0b38d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4981.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_rest_info - new option ``owning_resource`` for REST info that requires an owning resource. For instance volume for a snapshot + - na_ontap_rest_info - support added for protocols/nfs/export-policies/rules (Requires owning_resource to be set) + - na_ontap_rest_info - support added for storage/volumes/snapshots (Requires owning_resource to be set) \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4984.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4984.yaml new file mode 100644 index 000000000..573ce196a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4984.yaml @@ -0,0 +1,6 @@ +bugfixes: + - na_ontap_login_messages - fix typo in examples for username. + - na_ontap_volume - use ``time_out`` value when creating/modifying/deleting volumes with REST rathar than hardcoded value. + - na_ontap_volume - QOS policy was not set when using NAS application. + - na_ontap_volume - correctly warn when attempting to modify NAS application. + - na_ontap_volume - do not set encrypt on modify, as it is already handled with specialized ZAPI calls. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4985.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4985.yaml new file mode 100644 index 000000000..add428588 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4985.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_export_policy_rule - Add ``from_rule_index`` for both REST and ZAPI. Change ``rule_index`` to required. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4998.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4998.yaml new file mode 100644 index 000000000..d2f4cb8df --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-4998.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_command - fix typo in example. +minor_changes: + - na_ontap_volume_autosize - improve error reporting. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5015.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5015.yml new file mode 100644 index 000000000..08bb6d005 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5015.yml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs - fixed error in modifying comment if it is not set while creating CIFS share in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5016.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5016.yaml new file mode 100644 index 000000000..5571ed09e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5016.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_service_policy - fix examples in documentation. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5017.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5017.yaml new file mode 100644 index 000000000..b41d3323d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5017.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snmp_traphosts - Added ``host`` option in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5019.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5019.yaml new file mode 100644 index 000000000..dbe5c8568 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5019.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs - Added ``unix_symlink`` option in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5026.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5026.yaml new file mode 100644 index 000000000..5deed81d8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5026.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_quotas - support TB as a unit, update doc with size format description. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5034.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5034.yaml new file mode 100644 index 000000000..a51c59b05 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5034.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_interface - rename fails with 'inconsistency in rename action' for cluster interface with REST. +minor_changes: + - na_ontap_interface - use REST when ``use_rest`` is set to ``auto``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5047.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5047.yaml new file mode 100644 index 000000000..e06ff1c14 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5047.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_nfs - fix TypeError on NoneType as ``tcp_max_xfer_size`` is not supported in earlier ONTAP versions. + - na_ontap_nfs - fix ``Extra input`` error with ZAPI for ``is-nfsv4-enabled``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5062.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5062.yaml new file mode 100644 index 000000000..97cbc6e0c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5062.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_snapmirror - when using ZAPI, wait for the relationship to be quiesced before breaking. + - na_ontap_snapmirror - when using REST with a policy, fix AttributeError - 'str' object has no attribute 'get'. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5063.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5063.yaml new file mode 100644 index 000000000..3de8b46d8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5063.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_service_policy - fixed error in modify by changing resulting json of an existing record in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5065.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5065.yaml new file mode 100644 index 000000000..956e9760f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5065.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_nvme - fixed invalid boolean value error for ``status_admin`` when creating nvme service in ZAPI. + - na_ontap_nvme - fixed ``status_admin`` option is ignored if set to False when creating nvme service in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5068.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5068.yaml new file mode 100644 index 000000000..8f1dad96f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5068.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_cluster_config - fix the role to be able to create intercluster LIFs with REST (ipspace is required). + - na_ontap_interface - ignore ``vserver`` when using REST if role is one of 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5079.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5079.yml new file mode 100644 index 000000000..1eb5eb556 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5079.yml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_vserver_cifs_security - Added ``use_ldaps_for_ad_ldap`` and ``use_start_tls_for_ad_ldap`` as mutually exclusive in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5082.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5082.yaml new file mode 100644 index 000000000..fb5015100 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5082.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs_server - Added ``security`` options in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5084.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5084.yaml new file mode 100644 index 000000000..266de9994 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5084.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_qtree - Added ``unix_user`` and ``unix_group`` options in REST. +bugfixes: + - na_ontap_qtree - fix idempotency issue on ``unix_permissions`` option. + - na_ontap_volume - fix idempotency issue on ``unix_permissions`` option. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5085.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5085.yaml new file mode 100644 index 000000000..1e804aa74 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5085.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - attempt to delete volume even when unmounting or offlining failed. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5090.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5090.yaml new file mode 100644 index 000000000..1e045ef15 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5090.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_user - add support for SAML authentication_method. +bugfixes: + - na_ontap_user - fix idempotency issue with SSH with second_authentication_method. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5092.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5092.yaml new file mode 100644 index 000000000..91c0a623f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5092.yaml @@ -0,0 +1,8 @@ +minor_changes: + - na_ontap_snapmirror - validate source endpoint for ZAPI and REST, accounting for vserver local name. + - na_ontap_snapmirror - new option ``validate_source_path`` to disable this validation. + - na_ontap_snapmirror - improve errror messages to be more specific and consistent. + - na_ontap_snapmirror - wait for the relationship to come back to idle after a resync. +bugfixes: + - na_ontap_snapmirror - relax check for source when using REST. + - na_ontap_snapmirror - fix potential issue when destination is using REST but source is using ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5109.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5109.yaml new file mode 100644 index 000000000..b5868cc23 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5109.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_zapit - fix failure in precluster mode. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5121.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5121.yaml new file mode 100644 index 000000000..14dc816a2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5121.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - fix error in snapmirror restore by changing option ``clean_up_failure`` as optional when using ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5127.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5127.yaml new file mode 100644 index 000000000..12c985c29 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5127.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_quotas - fix idempotency issue on ``threshold`` option. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5136.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5136.yaml new file mode 100644 index 000000000..df02b4f5a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5136.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_aggregate - updated ``disk_types`` in documentation. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5137.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5137.yaml new file mode 100644 index 000000000..e1ba56940 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5137.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_autosupport - TypeError on ``support`` field with ONTAP 9.11. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5138.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5138.yaml new file mode 100644 index 000000000..96d1d451a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5138.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_net_subnet - delete fails if ipspace is different than Default. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5152.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5152.yaml new file mode 100644 index 000000000..631becb06 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5152.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_login_messages - support cluster scope when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5161.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5161.yaml new file mode 100644 index 000000000..6b36a3f8e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5161.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_autosupport - TypeError on ``ondemand_enabled`` field with ONTAP 9.11. + - na_ontap_autosupport - fix idempotency issue on ``state`` field with ONTAP 9.11. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5168.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5168.yaml new file mode 100644 index 000000000..21030291b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5168.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_user - accept ``service_processor`` as an alias for ``service-processor`` with ZAPI, to be consistent with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5174.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5174.yaml new file mode 100644 index 000000000..aedd838dc --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5174.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_svm - KeyError on CIFS when using REST with ONTAP 9.8 or lower. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5179.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5179.yaml new file mode 100644 index 000000000..868252f00 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5179.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_net_subnet - fixed ``ipspace`` option ignored in getting net subnet. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5188.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5188.yaml new file mode 100644 index 000000000..386da4b31 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5188.yaml @@ -0,0 +1,7 @@ +minor_changes: + - na_ontap_lun - support ``qos_adaptive_policy_group`` with REST. +bugfixes: + - na_ontap_lun - catch ZAPI error on get LUN. + - na_ontap_lun - ignore resize error if no change was required. + - na_ontap_lun - report error if flexvol_name is missing when using ZAPI. + - na_ontap_vserver_create role - add rule index as it is now required. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5189.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5189.yaml new file mode 100644 index 000000000..b2fdc3888 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5189.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_aggregate - new option ``allow_flexgroups`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5190.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5190.yaml new file mode 100644 index 000000000..de5d876ba --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5190.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_interface - enforce requirement for address/netmask for interfaces other than FC. + - na_ontap_interface - fix idempotency issue for cluster scoped interfaces when using REST. + - na_ontap_interface - fix potential node and uuid issues with LIF migration. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5195.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5195.yaml new file mode 100644 index 000000000..58145770c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5195.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_interface - ``dns_domain_name`` is now supported from ONTAP 9.9 or later in REST. + - na_ontap_interface - ``is_dns_update_enabled`` is now supported from ONTAP 9.9.1 or later in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5215.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5215.yaml new file mode 100644 index 000000000..2a28c0c5b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5215.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_s3_users - ``secret_key`` and ``access_token`` are now returned when creating a user. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5216.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5216.yaml new file mode 100644 index 000000000..644d823ab --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5216.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_cluster_config role - support ``broadcast_domain`` and ``service_policy`` with REST. + - na_ontap_interface - support ``broadcast_domain`` with REST. + - na_ontap_vserver_create role - support ``broadcast_domain``, ``ipspace``, and ``service_policy`` with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5220.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5220.yaml new file mode 100644 index 000000000..59e76f1f4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5220.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_s3_buckets - Module will not fail on create if no ``policy`` is given. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5223.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5223.yaml new file mode 100644 index 000000000..90b7d4fdb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5223.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_ntp - fixed typeError on ``key_id`` field with ZAPI. +minor_changes: + - na_ontap_ntp - for ONTAP version 9.6 or below fall back to ZAPI when ``use_rest`` is set to ``auto`` or fail when REST is desired. + - na_ontap_ntp_key - fail for ONTAP version 9.6 or below when ``use_rest`` is set to ``auto`` or when REST is desired. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5228.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5228.yaml new file mode 100644 index 000000000..df85b6242 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5228.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_volume - now defaults to REST with ``use_rest`` set to ``auto``, like every other module. ZAPI can be forced with ``use_rest`` set to ``never``. +bugfixes: + - na_ontap_volume - ``volume_security_style`` was not modified if other security options were present with ZAPI. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5229.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5229.yaml new file mode 100644 index 000000000..a86c4a906 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5229.yaml @@ -0,0 +1,6 @@ +bugfixes: + - na_ontap_interface - FC interfaces - scope is not supported. + - na_ontap_interface - FC interfaces - home_port is not supported for ONTAP 9.7 or earlier. + - na_ontap_interface - FC interfaces - home_node should not be sent as location.home_node. + - na_ontap_interface - FC interfaces - service_policy is not supported. + - na_ontap_interface - ignore 'none' when using REST rather than reporting unexpected protocol. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5241.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5241.yaml new file mode 100644 index 000000000..d695895cb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5241.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_software_update - deleting a software package is now supported with ZAPI and REST. +bugfixes: + - na_ontap_software_update - improve error handling if image file is already present. + - na_ontap_software_update - improve error handling when node is rebooting with REST. + - na_ontap_software_update - when using REST with ONTAP 9.9 or later, timeout value is properly set. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5243.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5243.yaml new file mode 100644 index 000000000..92c146865 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5243.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_wait_for_condition - added REST support. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5246.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5246.yaml new file mode 100644 index 000000000..553c2fd58 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5246.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_interface - fix error deleting fc interface if it is enabled in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5251.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5251.yaml new file mode 100644 index 000000000..851bc9555 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5251.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_interface - improved validations for unsupported options with FC interfaces. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5263.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5263.yaml new file mode 100644 index 000000000..428a178d5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5263.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_wait_for_condition - added ``snapmirror_relationship`` to wait on ``state`` or ``transfer_state`` (REST only). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5268.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5268.yaml new file mode 100644 index 000000000..fa99ec379 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5268.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_service_processor_network - fix idempotency issue on ``dhcp`` option in ZAPI. + - na_ontap_service_processor_network - fail module when trying to disable ``dhcp`` and not settting one of ``ip_address``, ``netmask``, ``gateway_ip_address`` different than current. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5270.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5270.yaml new file mode 100644 index 000000000..e4ff21208 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5270.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_job_schedule - new option ``cluster`` added. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5271.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5271.yaml new file mode 100644 index 000000000..7b07595bd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5271.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_rest_info -- Will now warn you if a ``gather_subset`` is not supported by your version of ONTAP. + - na_ontap_rest_info -- Will now include a message in return output about ``gather_subset`` not supported by your version of ONTAP. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5275.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5275.yaml new file mode 100644 index 000000000..60083a64d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5275.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_s3_groups - if `policies` is None module should no longer fail diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5285.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5285.yaml new file mode 100644 index 000000000..9a98c849f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5285.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_s3_buckets - fix options that cannot be modified if not set in creating s3 buckets. + - na_ontap_s3_buckets - fix TypeError if ``conditions`` not present in policy statements. + - na_ontap_s3_buckets - updated correct choices in options ``audit_event_selector.access`` and ``audit_event_selector.permission``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5287.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5287.yaml new file mode 100644 index 000000000..8b1c9f180 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5287.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_service_processor_network - allow manually configuring network if all of ``ip_address``, ``netmask``, ''gateway_ip_address`` set and ``dhcp`` not present in REST. + - na_ontap_service_processor_network - fix setting ``dhcp`` v4 takes more than ``wait_for_completion`` retries. + - na_ontap_service_processor_network - fix ``wait_for_completion`` ignored when trying to enable service processor network interface in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5297.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5297.yaml new file mode 100644 index 000000000..740ae54f9 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5297.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_security_key_manager - indicate that ``node`` is not used and is deprecated. +bugfixes: + - na_ontap_security_key_manager - fix KeyError on ``node``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5299.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5299.yaml new file mode 100644 index 000000000..135b59a43 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5299.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_user - enforce that all methods are under a single application. + - na_ontap_user - is_locked was not properly read with ZAPI, making the module not idempotent. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5304.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5304.yaml new file mode 100644 index 000000000..5abb989b4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5304.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_license - return list of updated package names. +bugfixes: + - na_ontap_license - fix intermittent KeyError when adding licenses with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5310.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5310.yaml new file mode 100644 index 000000000..d7b4e6c8f --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5310.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapmirror - wait 600 seconds for snapmirror creation to complete in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5312.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5312.yaml new file mode 100644 index 000000000..ed1a40b73 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5312.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume_efficiency - new option ``volume_name`` added. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5338.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5338.yaml new file mode 100644 index 000000000..3546e17f4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5338.yaml @@ -0,0 +1,3 @@ +minor_changes: + - all REST modules - new option ``force_ontap_version`` to bypass permission issues with custom vsadmin roles. + - na_ontap_rest_info - new option ``ignore_api_errors`` to report error in subset rather than breaking execution. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5344.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5344.yaml new file mode 100644 index 000000000..7d61a7259 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5344.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_svm - Added ``ndmp`` option to services in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5354.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5354.yaml new file mode 100644 index 000000000..139af04a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5354.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - added support for ``network/ip/subnets``. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5367.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5367.yaml new file mode 100644 index 000000000..3b3406381 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5367.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_export_policy_rule - ``rule_index`` is now optional for create and delete. + - na_ontap_export_policy_rule - new option ``force_delete_on_first_match`` to support duplicate entries on delete. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5380.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5380.yaml new file mode 100644 index 000000000..91f699c49 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5380.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cluster - ``timezone.name`` to modify cluster timezone. REST only. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5409.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5409.yaml new file mode 100644 index 000000000..bef0cc9a5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5409.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_snapmirror - ``schedule`` is handled through ``policy`` for REST. + - na_ontap_snapmirror_policy - improve error reporting and report errors in check_mode. + - na_ontap_snapmirror_policy - new option ``identity_preservation`` added. + - na_ontap_snapmirror_policy - ``name`` added as an alias for ``policy_name``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5412.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5412.yaml new file mode 100644 index 000000000..a3ec8b6cb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5412.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs_local_user_set_password - when using ZAPI, do not require cluster admin privileges. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5413.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5413.yaml new file mode 100644 index 000000000..d5f3d2173 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5413.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_kerberos_realm - fix cannot modify ``comment`` option in ZAPI. +minor_changes: + - na_ontap_kerberos_realm - change ``kdc_port`` option type to int. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5414.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5414.yaml new file mode 100644 index 000000000..dcde243a8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5414.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_net_subnet - added REST support. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5415.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5415.yaml new file mode 100644 index 000000000..1203101ba --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5415.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_dns - support cluster scope for modify and delete. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5426.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5426.yaml new file mode 100644 index 000000000..c1a924544 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5426.yaml @@ -0,0 +1,31 @@ +minor_changes: + - All REST GET's up to and including 9.11.1 that do not require a UUID/KEY to be past in are now supported + - na_ontap_rest_info - support added for cluster. + - na_ontap_rest_info - support added for cluster/counter/tables. + - na_ontap_rest_info - support added for cluster/licensing/capacity-pools. + - na_ontap_rest_info - support added for cluster/licensing/license-managers. + - na_ontap_rest_info - support added for cluster/metrocluster/svms. + - na_ontap_rest_info - support added for cluster/sensors. + - na_ontap_rest_info - support added for name-services/cache/group-membership/settings. + - na_ontap_rest_info - support added for name-services/cache/host/settings. + - na_ontap_rest_info - support added for name-services/cache/netgroup/settings. + - na_ontap_rest_info - support added for name-services/cache/setting. + - na_ontap_rest_info - support added for name-services/cache/unix-group/settings. + - na_ontap_rest_info - support added for name-services/ldap-schemas. + - na_ontap_rest_info - support added for network/fc/fabrics. + - na_ontap_rest_info - support added for network/fc/interfaces. + - na_ontap_rest_info - support added for network/fc/interfaces. + - na_ontap_rest_info - support added for network/ip/subnets. + - na_ontap_rest_info - support added for protocols/cifs/connections. + - na_ontap_rest_info - support added for protocols/cifs/netbios. + - na_ontap_rest_info - support added for protocols/cifs/session/files. + - na_ontap_rest_info - support added for protocols/cifs/shadow-copies. + - na_ontap_rest_info - support added for protocols/cifs/shadowcopy-sets. + - na_ontap_rest_info - support added for protocols/nfs/connected-client-maps. + - na_ontap_rest_info - support added for security. + - na_ontap_rest_info - support added for security/multi-admin-verify. + - na_ontap_rest_info - support added for security/multi-admin-verify/approval-groups. + - na_ontap_rest_info - support added for security/multi-admin-verify/requests. + - na_ontap_rest_info - support added for security/multi-admin-verify/rules. + - na_ontap_rest_info - support added for storage/file/moves. + - na_ontap_rest_info - support added for storage/pools. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5427.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5427.yaml new file mode 100644 index 000000000..5318907a2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5427.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cluster_config Role - incorrect license was shown - updated to GNU General Public License v3.0 diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5430.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5430.yaml new file mode 100644 index 000000000..500d22846 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5430.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume_efficiency - Missing fields in REST get should return None and not crash module. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5431.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5431.yaml new file mode 100644 index 000000000..4ee22157d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5431.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_info - Added vserver in key_fields of net_interface_info. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5453.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5453.yaml new file mode 100644 index 000000000..6cdfbe6d5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5453.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_ems_destination - fix idempotency issue when ``type`` value is rest_api. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5457.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5457.yaml new file mode 100644 index 000000000..ad4070358 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5457.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - Allowed the support of multiple subsets and warn when using ``**`` in fields. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5479.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5479.yaml new file mode 100644 index 000000000..3045cded6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5479.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_command - do not run command in check_mode (thanks to darksoul42). + - na_ontap_rest_cli - do not run command in check_mode (thanks to darksoul42). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5481.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5481.yaml new file mode 100644 index 000000000..a9b260db5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5481.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs_acl - use ``type`` when deleting unix-user or unix-group from ACL in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5484.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5484.yaml new file mode 100644 index 000000000..9313dfb32 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5484.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_cifs - fix KeyError on ``unix_symlink`` field when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5485.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5485.yaml new file mode 100644 index 000000000..3601af742 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5485.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_interface - improve error message when interface type is required with REST. + - na_ontap_qtree - fix KeyError on unix_permissions. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5487.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5487.yaml new file mode 100644 index 000000000..507398204 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5487.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_ems_destination - improve error messages - augment UT coverage (thanks to bielawb). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5503.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5503.yaml new file mode 100644 index 000000000..b1f276775 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5503.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_restit - support multipart/form-data for read and write. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5504.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5504.yaml new file mode 100644 index 000000000..0d98dd646 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5504.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_vserver_create - ``protocol`` is now optional. ``role`` is not set when protocol is absent. + - na_ontap_vserver_create - ``firewall_policy`` is not set when ``service_policy`` is present, as ``service_policy`` is preferred. + - na_ontap_vserver_create - added ``interface_type``. Only a value of ``ip`` is currently supported. + - na_ontap_vserver_create - added support for vserver management interface when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5505.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5505.yaml new file mode 100644 index 000000000..2bd36abe6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5505.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_interface - attempt to set interface_type to ``ip`` when ``protocols`` is set to "none". diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5506.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5506.yaml new file mode 100644 index 000000000..cc3cdcd67 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5506.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_user - fix idempotency issue with 9.11 because of new is_ldap_fastbind field. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5507.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5507.yaml new file mode 100644 index 000000000..337bd3098 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5507.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_iscsi - new option ``target_alias`` added in REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5531.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5531.yaml new file mode 100644 index 000000000..27be3bd72 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5531.yaml @@ -0,0 +1,7 @@ +minor_changes: + - na_ontap_autosupport_invoke - warn when ``message`` alias is used as it will be removed - it conflicts with Ansible internal variable. + - na_ontap_login_message - warn when ``message`` alias is used as it will be removed - it conflicts with Ansible internal variable. + - na_ontap_motd - warn when ``message`` alias is used as it will be removed - it conflicts with Ansible internal variable. + - na_ontap_nfs - warn when ``nfsv4.1`` alias is used as it will be removed - it does not match Ansible naming convention. +bugfixes: + - na_ontap_firmware_upgrade - when enabled, disruptive_update would always update even when update is not required. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5532.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5532.yaml new file mode 100644 index 000000000..43e4dc07b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5532.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_interface - fix error where an ``address`` with an IPV6 ip would try to modify each time playbook was run. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5536.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5536.yaml new file mode 100644 index 000000000..1f4ba234a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5536.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume_efficiency - fix idempotent issue when state is absent and efficiency options are set in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5537.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5537.yaml new file mode 100644 index 000000000..daa982815 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5537.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - when deleting a volume, don't report a warning when unmount is successful (error is None). diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5540.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5540.yaml new file mode 100644 index 000000000..ca5e328eb --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5540.yaml @@ -0,0 +1,2 @@ +bugfixes: + - new meta/execution-environment.yml is failing ansible-builder sanitize step. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5548.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5548.yaml new file mode 100644 index 000000000..b2f3d37c6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5548.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - ``snapdir_access`` is not supported by REST and will currently inform you now if you try to use it with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5589.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5589.yaml new file mode 100644 index 000000000..f7a939582 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5589.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_interface - fix ``netmask`` not idempotent in REST. +minor_changes: + - na_ontap_interface - allow setting ``netmask`` with netmask length in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5591.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5591.yaml new file mode 100644 index 000000000..e01509eee --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5591.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_cli - returns changed only for verbs POST, PATCH and DELETE. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5592.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5592.yaml new file mode 100644 index 000000000..188303637 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5592.yaml @@ -0,0 +1,2 @@ +bugfixes: + - iso8601 filters - fix documentation generation issue. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5594.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5594.yaml new file mode 100644 index 000000000..83e0b240c --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5594.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_export_policy_rule - ``allow_device_creation`` and ``chown_mode`` is now supported in ZAPI. + - na_ontap_export_policy_rule - ``allow_suid``, ``allow_device_creation`` and ``chown_mode`` is now supported from ONTAP 9.9.1 or later in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5595.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5595.yaml new file mode 100644 index 000000000..24e39e148 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5595.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_rest_info - support added for protocols/active-directory. + - na_ontap_rest_info - support added for protocols/cifs/group-policies. + - na_ontap_rest_info - support added for protocols/nfs/connected-client-settings. + - na_ontap_rest_info - support added for security/aws-kms. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5596.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5596.yaml new file mode 100644 index 000000000..7b7c14093 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5596.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_ldap_client - new option ``skip_config_validation``. +bugfixes: + - na_ontap_ldap_client - ``servers`` not accepted when using ZAPI and ``ldap_servers`` not handling a single server properly. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5604.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5604.yaml new file mode 100644 index 000000000..7d445dfb6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5604.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_interface - fix cannot set ``location.node.name`` and ``location.home_node.name`` error when creating or modifying fc interface. + - na_ontap_interface - fix unexpected argument error with ``ipspace`` when trying to get fc interface. +minor_changes: + - na_ontap_interface - error when try to migrate fc interface in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5606.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5606.yaml new file mode 100644 index 000000000..a90f42364 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5606.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_security_key_manager - requires 9.7+ to work with REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5611.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5611.yaml new file mode 100644 index 000000000..118a3364a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5611.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_service_policy - update services for 9.11.1 - make it easier to add new services. + - na_ontap_service_policy - new options ``known_services`` and ``additional_services``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5626.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5626.yaml new file mode 100644 index 000000000..a28080368 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5626.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_user_role - fixed Invalid JSON input. Expecting "privileges" to be an array. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5628.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5628.yaml new file mode 100644 index 000000000..558b1c504 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5628.yaml @@ -0,0 +1,8 @@ +bugfixes: + - na_ontap_user_role - fix AttributeError 'NetAppOntapUserRole' object has no attribute 'name'. + - na_ontap_user_role - fix duplicate entry error in ZAPI. + - na_ontap_user_role - fix entry does not exist error when trying to delete privilege in REST. + - na_ontap_user_role - fix KeyError on ``vserver``, ``command_directory_name`` in ZAPI and ``path``, ``query`` in REST. +minor_changes: + - na_ontap_user_role - ``path`` is required if ``privileges`` set in REST. + - na_ontap_user_role - ``command_directory_name`` is required if ``privileges`` not set in REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5629.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5629.yaml new file mode 100644 index 000000000..aac0e5656 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5629.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_debug - report python executable version and path. + - tracing - allow to selectively trace headers and authentication. +bugfixes: + - tracing - redact headers and authentication secrets by default. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5659.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5659.yaml new file mode 100644 index 000000000..58edf4f54 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5659.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_cifs_local_group_member - Added REST API support to retrieve, add and remove CIFS group member. + - na_ontap_cifs_local_group_member - REST support is from ONTAP 9.10.1 or later. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5662.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5662.yaml new file mode 100644 index 000000000..2d6bae722 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5662.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_mcc_mediator - Fix error that would prevent mediator deletion, \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5665.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5665.yaml new file mode 100644 index 000000000..691e68226 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5665.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_volume - report error if vserver does not exist or is not a data vserver on create. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5666.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5666.yaml new file mode 100644 index 000000000..1ed1599bc --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5666.yaml @@ -0,0 +1,8 @@ +bugfixes: + - na_ontap_aggregate - allow adding disks before trying to offline aggregate. + - na_ontap_aggregate - fix ``service_state`` option skipped if its set to offline in create. +minor_changes: + - na_ontap_aggregate - add support for ``service_state`` option from ONTAP 9.11.1 or later in REST. + - na_ontap_aggregate - add ``name`` to modify in module output if aggregate is renamed. + - na_ontap_aggregate - error if ``unmount_volumes`` set in REST, by default REST unmount volumes when trying to offline aggregate. + - na_ontap_aggregate - fix examples in documentation. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5671.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5671.yaml new file mode 100644 index 000000000..d0101d1d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5671.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume -- fixed bug preventing unmount and taking a volume off line at the same time \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5677.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5677.yaml new file mode 100644 index 000000000..d9ccc140e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5677.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_cifs_server - fix ``service_state`` is stopped when trying to modify cifs server in REST. +minor_changes: + - na_ontap_cifs_server - skip ``service_state`` option if not set in create. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5678.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5678.yaml new file mode 100644 index 000000000..fd262cfb4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5678.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_snapmirror_policy - new option ``copy_all_source_snapshots`` added in REST. +bugfixes: + - na_ontap_snapmirror_policy - fixed idempotency issue on ``identity_preservation`` option when using REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5696.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5696.yaml new file mode 100644 index 000000000..34704aebd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5696.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_quotas - fix default tree quota rule gets modified when ``quota_target`` is set in REST. + - na_ontap_quotas - fix user/group quota rule without qtree gets modified when ``qtree`` is set. +minor_changes: + - na_ontap_quotas - for qtree type, allow quota_target in path format /vol/vol_name/qtree_name in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5711.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5711.yaml new file mode 100644 index 000000000..8a0a53541 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5711.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_qtree - fix cannot get current qtree if enclosed in curly braces. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5713.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5713.yaml new file mode 100644 index 000000000..5f8c83dca --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5713.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_interface - new option ``probe_port`` for Azure load balancer. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5725.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5725.yaml new file mode 100644 index 000000000..331ac37e2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5725.yaml @@ -0,0 +1,7 @@ +minor_changes: + - na_ontap_snapmirror_policy - new option ``copy_latest_source_snapshot``, ``create_snapshot_on_source`` and ``sync_type`` added in REST. + - na_ontap_snapmirror_policy - warn when replacing policy type ``async_mirror``, ``mirror_vault`` and ``vault`` with policy type ``async`` and ``strict_sync_mirror``, ``sync_mirror`` with ``sync`` in REST. + - na_ontap_snapmirror_policy - Added unsupported options in ZAPI. + - na_ontap_snapmirror_policy - Added new choices sync and async for policy type in REST. +bugfixes: + - na_ontap_snapmirror_policy - fix desired policy type not configured in cli with REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5733.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5733.yaml new file mode 100644 index 000000000..15349663d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5733.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_file_security_permissions - updated notes to indicate ONTAP 9.9.1 or later is required. + - na_ontap_file_security_permissions_acl - updated notes to indicate ONTAP 9.9.1 or later is required. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5734.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5734.yaml new file mode 100644 index 000000000..33837d009 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5734.yaml @@ -0,0 +1,6 @@ +bugfixes: + - na_ontap_active_directory - updated doc as only ZAPI is supported at present, force an error with use_rest always. + - na_ontap_cg_snapshot - updated doc with deprecation warning as it is a ZAPI only module. + - na_ontap_file_directory_policy - updated doc with deprecation warning as it is a ZAPI only module. + - na_ontap_quota_policy - updated doc with deprecation warning as it is a ZAPI only module. + - na_ontap_svm_options - updated doc with deprecation warning as it is a ZAPI only module. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5735.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5735.yaml new file mode 100644 index 000000000..157a3538d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5735.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_active_directory - REST requires ONTAP 9.12.1 or later. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5737.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5737.yaml new file mode 100644 index 000000000..607969ab8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5737.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_interface - new option ``fail_if_subnet_conflicts`` - requires REST and ONTAP 9.11.1 or later. + - na_ontap_interface - option ``subnet_name`` is now supported with REST with ONTAP 9.11.1 or later. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5738.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5738.yaml new file mode 100644 index 000000000..bf124a2fe --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5738.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_nfs - new options ``root``, ``windows`` and ``security`` added in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5757.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5757.yaml new file mode 100644 index 000000000..72224d6d8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5757.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_volume_efficiency - updated private cli with REST API. + - na_ontap_volume_efficiency - REST support for ``policy`` requires 9.7 or later, ``path`` requires 9.9.1 or later and ``volume_efficiency`` and ``start_ve_scan_old_data`` requires 9.11.1 or later. + - na_ontap_volume_efficiency - ``schedule``, ``start_ve_scan_all``, ``start_ve_build_metadata``, ``start_ve_delete_checkpoint``, ``start_ve_queue_operation``, ``start_ve_qos_policy`` and ``stop_ve_all_operations`` options are not supported with REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5760.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5760.yaml new file mode 100644 index 000000000..bc0d9f856 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5760.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapmirror - support ``schedule`` with REST and ONTAP 9.11.1, add alias ``transfer_schedule``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5761.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5761.yaml new file mode 100644 index 000000000..393363f34 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5761.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_snapmirror_policy - new option ``transfer_schedule`` for async policy types. + - na_ontap_snapmirror_policy - add support for cluster scoped policy with REST. +bugfixes: + - na_ontap_snapmirror_policy - deleting all retention rules would trigger an error when the existing policy requires at least one rule. + - na_ontap_snapmirror_policy - index error on rules with ONTAP 9.12.1 as not all fields are present. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5774.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5774.yaml new file mode 100644 index 000000000..36fca7dc2 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5774.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_security_ipsec_policy - fix cannot get current security IPsec policy with ipspace. + - na_ontap_security_ipsec_policy - fix KeyError on ``authentication_method``. +minor_changes: + - na_ontap_active_directory - add ``fqdn`` as aliases for ``domain``. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5784.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5784.yaml new file mode 100644 index 000000000..2cd83a338 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5784.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_quotas - fix duplicate entry error when trying to add quota rule in REST. + - na_ontap_quotas - fix entry does not exist error when trying to modify quota status in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5788.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5788.yaml new file mode 100644 index 000000000..61a5630c6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5788.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_svm - warn in case of mismatch in language option spelling. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5790.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5790.yaml new file mode 100644 index 000000000..6fc319c35 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5790.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapshot - fix cannot modify ``snapmirror_label``, ``expiry_time`` and ``comment`` if not configured in create. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5807.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5807.yaml new file mode 100644 index 000000000..356618ef3 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5807.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_cifs - new options ``access_based_enumeration``, ``change_notify``, ``encryption``, ``home_directory``, ``oplocks``, ``show_snapshot``, ``allow_unencrypted_access``, + ``namespace_caching`` and ``continuously_available`` added in REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5808.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5808.yaml new file mode 100644 index 000000000..9bc615d8e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5808.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_san_create - Role documentation correct to from nas to san \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5809.yml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5809.yml new file mode 100644 index 000000000..4ea42e1d5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5809.yml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_interface - fix idempotency issue when ``home_port`` not set in creating FC interface. +minor_changes: + - na_ontap_interface - do not attempt to migrate FC interface if desired ``home_port``, ``home_node`` and ``current_port``, ``current_node`` are same. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5812.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5812.yaml new file mode 100644 index 000000000..ab799dbaf --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5812.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_license - support for NLF v2 license files. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5816.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5816.yaml new file mode 100644 index 000000000..82ae40047 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5816.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_cifs - new options ``browsable`` and ``show_previous_versions`` added in REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5819.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5819.yaml new file mode 100644 index 000000000..89971c32b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5819.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_aggregate - try to offline aggregate when disk add operation is in progress in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5820.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5820.yaml new file mode 100644 index 000000000..7775ba10e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5820.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_rest_info - fix field issue with private/cli and support/autosupport/check APIs. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5844.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5844.yaml new file mode 100644 index 000000000..57ba85878 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5844.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_dns - ``skip_validation`` option requires 9.9.1 or later with REST and ignored for cluster DNS operations. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5845.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5845.yaml new file mode 100644 index 000000000..ada44f811 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5845.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_qtree - ignore job entry does not exist error when creating qtree with REST to bypass ONTAP issue with FSx. + - na_ontap_quotas - ignore job entry does not exist error when creating quota with REST to bypass ONTAP issue with FSx. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5859.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5859.yaml new file mode 100644 index 000000000..41b9649dd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5859.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_file_security_permissions - updated module examples. +bugfixes: + - na_ontap_file_security_permissions - error if more than one desired ACLs has same user, access, access_control and apply_to. + - na_ontap_file_security_permissions - fix idempotency issue on ``acls.propagation_mode`` option. + - na_ontap_file_security_permissions - fix TypeError when current acls is None. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5892.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5892.yaml new file mode 100644 index 000000000..1659e9776 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5892.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - fix idempotent issue when try to offline and modify other volume options. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5894.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5894.yaml new file mode 100644 index 000000000..3b4b1799a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5894.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_rest_info - improved documentation for ``parameters`` option. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5899.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5899.yaml new file mode 100644 index 000000000..32b90a26e --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5899.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_ldap_client - fix duplicate entry error when used cluster vserver in REST. + - na_ontap_ldap_client - fix KeyError on ``name`` in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5910.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5910.yaml new file mode 100644 index 000000000..09fbe6d55 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5910.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_vserver_audit - fix invalid field value error of log retention count and duration. + - na_ontap_vserver_audit - Added ``log_path`` option in modify. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5913.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5913.yaml new file mode 100644 index 000000000..707b5c635 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5913.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_snapmirror - new option ``identity_preservation`` added in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5917.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5917.yaml new file mode 100644 index 000000000..7da0a1fde --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5917.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_ontap_security_config - new option ``supported_cipher_suites`` added in REST. + - na_ontap_security_config - Replaced private cli with REST API for GET and PATCH. + - na_ontap_security_config - Added support for protocol version ``TLSV1.3``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5919.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5919.yaml new file mode 100644 index 000000000..a3fed0767 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5919.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_volume - fix error when try to unmount volume and modify snaplock attribute. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5926.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5926.yaml new file mode 100644 index 000000000..9b6205ff1 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5926.yaml @@ -0,0 +1,5 @@ +bugfixes: + - na_ontap_user_role - report error when command/command directory path set in REST for ONTAP earlier versions. +minor_changes: + - na_ontap_user_role - add support for rest-role ``privileges.access`` choices ``read_create``, ``read_modify`` and ``read_create_modify``, supported only with REST and requires ONTAP 9.11.1 or later versions. + - na_ontap_user_role - ``command_directory_name`` requires 9.11.1 or later with REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5938.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5938.yaml new file mode 100644 index 000000000..a5a7bfe8d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5938.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_interface - fix incorrect warning raised when try to rename interface. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5948.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5948.yaml new file mode 100644 index 000000000..2dd738d57 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5948.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_user - fix KeyError vserver in ZAPI. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5952.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5952.yaml new file mode 100644 index 000000000..93bcbc426 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5952.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror - fix invalid value error for return_timeout, modified the value to 120 seconds. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5960.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5960.yaml new file mode 100644 index 000000000..60a428d9d --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5960.yaml @@ -0,0 +1,4 @@ +bugfixes: + - na_ontap_security_config - fix error on specifying protocol version ``TLSv1.1`` when fips is enabled. +minor_changes: + - na_ontap_security_config - updated documentation for ``supported_cipher_suites``. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5972.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5972.yaml new file mode 100644 index 000000000..635b22f74 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5972.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_snapmirror - error if identity_preservation set in ZAPI. + - na_ontap_snapmirror - Added option ``identity_preservation`` support from ONTAP 9.11.1 in REST. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5983.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5983.yaml new file mode 100644 index 000000000..6a345d1d6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5983.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_ontap_cifs - updated documentation and examples for REST. + - na_ontap_cifs - removed default value for ``unix_symlink`` as its not supported with ZAPI. +bugfixes: + - na_ontap_cifs - throw error if set ``unix_symlink`` in ZAPI. + - na_ontap_cifs - throw error if used options that require recent ONTAP version. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5986.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5986.yaml new file mode 100644 index 000000000..2b46c9bef --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-5986.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_iscsi_security - error module if use_rest never is set. + - na_ontap_iscsi_security - fix KeyError on ``outbound_username`` option. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6001.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6001.yaml new file mode 100644 index 000000000..ae01a6be6 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6001.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_file_security_permissions_acl - fix idempotent issue on ``propagation_mode`` option. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6005.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6005.yaml new file mode 100644 index 000000000..d0e07ae65 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6005.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_user - option ``vserver`` is not required with REST, ignore this option to create cluster scoped user. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6014.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6014.yaml new file mode 100644 index 000000000..1ca42e521 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6014.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_aggregate - new REST only option ``tags`` added, requires ONTAP 9.13.1 or later version. + - na_ontap_volume - new REST only option ``tags`` added, requires ONTAP 9.13.1 or later version. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6015.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6015.yaml new file mode 100644 index 000000000..b4e1d89b5 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6015.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_qos_adaptive_policy_group - rename group when from_name is present and state is present. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6191.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6191.yaml new file mode 100644 index 000000000..d48060988 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6191.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_snapmirror_policy - fix cannot disable ``is_network_compression_enabled`` in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6192.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6192.yaml new file mode 100644 index 000000000..5bf7bddea --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6192.yaml @@ -0,0 +1,2 @@ +minor_changes: + - retry create or modify when getting temporarily locked from changes error in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6193.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6193.yaml new file mode 100644 index 000000000..cf87f9df8 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6193.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_s3_buckets - new option ``type`` added, requires ONTAP 9.12.1 or later. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6195.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6195.yaml new file mode 100644 index 000000000..b2331a883 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6195.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_s3_groups - fix error when current s3 groups has no users configured. + - na_ontap_s3_groups - fix cannot modify ``policies`` if not configured in create. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6209.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6209.yaml new file mode 100644 index 000000000..ae3e9191a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6209.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_security_certificates - fix duplicate entry error when ``vserver`` option is set with admin vserver. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6233.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6233.yaml new file mode 100644 index 000000000..c8929bb36 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6233.yaml @@ -0,0 +1,5 @@ +minor_changes: + - na_ontap_qos_policy_group - new REST only option ``adaptive_qos_options.block_size`` added, requires ONTAP 9.10.1 or later version. + - na_ontap_qos_policy_group - skip checking modify when ``state`` is absent. +bugfixes: + - na_ontap_qos_policy_group - one occurrence of msg missing in call to fail_json. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6235.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6235.yaml new file mode 100644 index 000000000..d3e3f4310 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6235.yaml @@ -0,0 +1,6 @@ +bugfixes: + - na_ontap_svm - skip modify validation when trying to delete svm. + - na_ontap_export_policy - fix cannot delete export policy if ``from_name`` option is set. +minor_changes: + - na_ontap_broadcast_domain - skip checking modify when ``state`` is absent. + - na_ontap_export_policy - added ``name`` to modify in module output if export policy is renamed. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6262.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6262.yaml new file mode 100644 index 000000000..474bd912a --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6262.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_login_messages - fix ``banner`` and ``motd_message`` not idempotent when trailing '\n' is present. + - na_ontap_login_messages - fix idempotent issue on ``show_cluster_motd`` option when try to set banner or motd_message for the first time in REST. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6266.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6266.yaml new file mode 100644 index 000000000..6e3d4a55b --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/DEVOPS-6266.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_ontap_name_mappings - added choices ``s3_win`` and ``s3_unix`` to ``direction``, requires ONTAP 9.12.1 or later. + - na_ontap_s3_buckets - new option ``nas_path`` added, requires ONTAP 9.12.1 or later. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/github-110.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/github-110.yaml new file mode 100644 index 000000000..5e451e978 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/github-110.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_ontap_rest_info - fixed error where module would fail silently when using ``owning_resouce`` and a non-existent vserver. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml new file mode 100644 index 000000000..14f15a6bd --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/github-56.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_ontap_lun - ``use_exact_size`` to create a lun with the exact given size so that the lun is not rounded up. diff --git a/ansible_collections/netapp/ontap/changelogs/fragments/no-story-1.yaml b/ansible_collections/netapp/ontap/changelogs/fragments/no-story-1.yaml new file mode 100644 index 000000000..b4ba862e4 --- /dev/null +++ b/ansible_collections/netapp/ontap/changelogs/fragments/no-story-1.yaml @@ -0,0 +1,3 @@ +bugfixes: + - na_ontap_s3_buckets - Module work currently when ``sid`` is a number. + - na_ontap_s3_buckets - Module will set ``enabled`` during create. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/execution_environments/README.md b/ansible_collections/netapp/ontap/execution_environments/README.md new file mode 100644 index 000000000..5d23ecc9a --- /dev/null +++ b/ansible_collections/netapp/ontap/execution_environments/README.md @@ -0,0 +1,34 @@ +# How to build an Ansible Execution Environment + +## Prerequisites +This was tested with ansible-builder version 1.1.0. + +## Building from Galaxy +Using the files in the ansible_collections/netapp/ontap/execution_environments/from_galaxy directory as a template: +- execution-environment.yml describes the build environment. +- requirements.yml defines the collections to add into you execution environment. + +Then build with: + +``` +ansible-builder build +``` + +For instance, using podman instead of docker, and tagging: +``` +ansible-builder build --container-runtime=podman --tag myregistry.io/ansible-ee-netapp:21.24.1 -f execution-environment.yml -v 3 +``` + +In my case, I needed to use sudo. + +## Building from GitHub +Alternativaly, the source code can be downloaded from GitHub. It allows to get code before release (at your own risks) or to use a fork. +See ansible_collections/netapp/ontap/execution_environments/from_github/requirements.yml + +## References + +https://ansible-builder.readthedocs.io/en/stable/usage/ + +https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html + + diff --git a/ansible_collections/netapp/ontap/execution_environments/from_galaxy/execution-environment.yml b/ansible_collections/netapp/ontap/execution_environments/from_galaxy/execution-environment.yml new file mode 100644 index 000000000..466fb8373 --- /dev/null +++ b/ansible_collections/netapp/ontap/execution_environments/from_galaxy/execution-environment.yml @@ -0,0 +1,10 @@ +--- +version: 1 + +# ansible_config: 'ansible.cfg' + +# build_arg_defaults: +# EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest' + +dependencies: + galaxy: requirements.yml diff --git a/ansible_collections/netapp/ontap/execution_environments/from_galaxy/requirements.yml b/ansible_collections/netapp/ontap/execution_environments/from_galaxy/requirements.yml new file mode 100644 index 000000000..b9cceb26a --- /dev/null +++ b/ansible_collections/netapp/ontap/execution_environments/from_galaxy/requirements.yml @@ -0,0 +1,13 @@ +--- +collections: + # Install collections from Galaxy + # - name: ansible.posix + # - name: netapp.aws + # # name: - netapp.azure + # - name: netapp.cloudmanager + # version: 21.19.0 + # - name: netapp.elementsw + - name: netapp.ontap + version: 21.24.1 + # - name: netapp.storagegrid + # - name: netapp.um_info diff --git a/ansible_collections/netapp/ontap/execution_environments/from_github/execution-environment.yml b/ansible_collections/netapp/ontap/execution_environments/from_github/execution-environment.yml new file mode 100644 index 000000000..466fb8373 --- /dev/null +++ b/ansible_collections/netapp/ontap/execution_environments/from_github/execution-environment.yml @@ -0,0 +1,10 @@ +--- +version: 1 + +# ansible_config: 'ansible.cfg' + +# build_arg_defaults: +# EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest' + +dependencies: + galaxy: requirements.yml diff --git a/ansible_collections/netapp/ontap/execution_environments/from_github/requirements.yml b/ansible_collections/netapp/ontap/execution_environments/from_github/requirements.yml new file mode 100644 index 000000000..2640402ad --- /dev/null +++ b/ansible_collections/netapp/ontap/execution_environments/from_github/requirements.yml @@ -0,0 +1,18 @@ +--- +collections: + # Install collections from Galaxy + # - name: ansible.posix + # - name: netapp.aws + # # name: - netapp.azure + # - name: netapp.cloudmanager + # version: 21.19.0 + # - name: netapp.elementsw + # - name: netapp.ontap + # version: 21.24.1 + # - name: netapp.storagegrid + # - name: netapp.um_info + + # Install a collection from GitHub. + - source: https://github.com/ansible-collections/netapp.ontap.git + type: git + version: 21.24.1 diff --git a/ansible_collections/netapp/ontap/execution_environments/requirements.txt b/ansible_collections/netapp/ontap/execution_environments/requirements.txt new file mode 100644 index 000000000..02dd40520 --- /dev/null +++ b/ansible_collections/netapp/ontap/execution_environments/requirements.txt @@ -0,0 +1 @@ +ansible-builder diff --git a/ansible_collections/netapp/ontap/meta/execution-environment.yml b/ansible_collections/netapp/ontap/meta/execution-environment.yml new file mode 100644 index 000000000..ad211b139 --- /dev/null +++ b/ansible_collections/netapp/ontap/meta/execution-environment.yml @@ -0,0 +1,3 @@ +version: 1 +dependencies: + python: requirements.txt diff --git a/ansible_collections/netapp/ontap/meta/runtime.yml b/ansible_collections/netapp/ontap/meta/runtime.yml new file mode 100644 index 000000000..49dcabb60 --- /dev/null +++ b/ansible_collections/netapp/ontap/meta/runtime.yml @@ -0,0 +1,153 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_ontap: + - na_ontap_active_directory_domain_controllers + - na_ontap_active_directory + - na_ontap_aggregate + - na_ontap_autosupport_invoke + - na_ontap_autosupport + - na_ontap_bgp_peer_group + - na_ontap_broadcast_domain_ports + - na_ontap_broadcast_domain + - na_ontap_cg_snapshot + - na_ontap_cifs_acl + - na_ontap_cifs_local_group + - na_ontap_cifs_local_group_member + - na_ontap_cifs_local_user + - na_ontap_cifs_local_user_modify + - na_ontap_cifs_local_user_set_password + - na_ontap_cifs + - na_ontap_cifs_server + - na_ontap_cluster_ha + - na_ontap_cluster_peer + - na_ontap_cluster + - na_ontap_command + - na_ontap_debug + - na_ontap_disk_options + - na_ontap_disks + - na_ontap_dns + - na_ontap_domain_tunnel + - na_ontap_efficiency_policy + - na_ontap_ems_destination + - na_ontap_ems_filter + - na_ontap_export_policy + - na_ontap_export_policy_rule + - na_ontap_fcp + - na_ontap_fdsd + - na_ontap_fdsp + - na_ontap_fdspt + - na_ontap_fdss + - na_ontap_file_directory_policy + - na_ontap_file_security_permissions + - na_ontap_file_security_permissions_acl + - na_ontap_firewall_policy + - na_ontap_firmware_upgrade + - na_ontap_flexcache + - na_ontap_fpolicy_event + - na_ontap_fpolicy_ext_engine + - na_ontap_fpolicy_policy + - na_ontap_fpolicy_scope + - na_ontap_fpolicy_status + - na_ontap_igroup_initiator + - na_ontap_igroup + - na_ontap_info + - na_ontap_interface + - na_ontap_ipspace + - na_ontap_iscsi + - na_ontap_iscsi_security + - na_ontap_job_schedule + - na_ontap_kerberos_interface + - na_ontap_kerberos_realm + - na_ontap_ldap_client + - na_ontap_ldap + - na_ontap_license + - na_ontap_local_hosts + - na_ontap_log_forward + - na_ontap_login_messages + - na_ontap_lun_copy + - na_ontap_lun_map + - na_ontap_lun_map_reporting_nodes + - na_ontap_lun + - na_ontap_mcc_mediator + - na_ontap_metrocluster_dr_group + - na_ontap_metrocluster + - na_ontap_motd + - na_ontap_name_mappings + - na_ontap_name_service_switch + - na_ontap_ndmp + - na_ontap_net_ifgrp + - na_ontap_net_port + - na_ontap_net_routes + - na_ontap_net_subnet + - na_ontap_net_vlan + - na_ontap_nfs + - na_ontap_node + - na_ontap_ntfs_dacl + - na_ontap_ntfs_sd + - na_ontap_ntp + - na_ontap_ntp_key + - na_ontap_nvme_namespace + - na_ontap_nvme + - na_ontap_nvme_subsystem + - na_ontap_object_store + - na_ontap_partitions + - na_ontap_portset + - na_ontap_ports + - na_ontap_publickey + - na_ontap_qos_adaptive_policy_group + - na_ontap_qos_policy_group + - na_ontap_qtree + - na_ontap_quota_policy + - na_ontap_quotas + - na_ontap_rest_cli + - na_ontap_rest_info + - na_ontap_restit + - na_ontap_s3_buckets + - na_ontap_s3_groups + - na_ontap_s3_policies + - na_ontap_s3_services + - na_ontap_s3_users + - na_ontap_security_certificates + - na_ontap_security_config + - na_ontap_security_ipsec_ca_certificate + - na_ontap_security_ipsec_config + - na_ontap_security_ipsec_policy + - na_ontap_security_key_manager + - na_ontap_security_ssh + - na_ontap_service_policy + - na_ontap_service_processor_network + - na_ontap_snaplock_clock + - na_ontap_snapmirror_policy + - na_ontap_snapmirror + - na_ontap_snapshot_policy + - na_ontap_snapshot + - na_ontap_snmp + - na_ontap_snmp_traphosts + - na_ontap_software_update + - na_ontap_ssh_command + - na_ontap_storage_auto_giveback + - na_ontap_storage_failover + - na_ontap_svm_options + - na_ontap_svm + - na_ontap_ucadapter + - na_ontap_unix_group + - na_ontap_unix_user + - na_ontap_user + - na_ontap_user_role + - na_ontap_volume_autosize + - na_ontap_volume_clone + - na_ontap_volume_efficiency + - na_ontap_volume + - na_ontap_volume_snaplock + - na_ontap_vscan_on_access_policy + - na_ontap_vscan_on_demand_task + - na_ontap_vscan + - na_ontap_vscan_scanner_pool + - na_ontap_vserver_audit + - na_ontap_vserver_cifs_security + - na_ontap_vserver_peer + - na_ontap_vserver_peer_permissions + - na_ontap_wait_for_condition + - na_ontap_wwpn_alias + - na_ontap_zapit diff --git a/ansible_collections/netapp/ontap/playbooks/examples/README.md b/ansible_collections/netapp/ontap/playbooks/examples/README.md new file mode 100644 index 000000000..1d90cbfaa --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/README.md @@ -0,0 +1,37 @@ +============================================================= + + netapp.ontap + + NetApp ONTAP Collection + + Copyright (c) 2020 NetApp, Inc. All rights reserved. + Specifications subject to change without notice. + +============================================================= +# Playbook examples + +As the name indicates, these are examples, and while they are working at the time of publication, we do not support these playbooks. +We cannot guarantee they are working on other systems, or other configurations, or other versions than what we used at the time. +We will not maintain these playbooks as time passes. + +## ONTAP Firmware Updates + +By default, downloading a firmware image is enough to trigger an update. +The update happens automatically in background for the disk qualification package and for disk, shelf, and ACP firmwares. It is designed to be non disruptive. + +The SP firmware will be automatically installed, but requires a node reboot. The reboot is not done in these playbooks. + +The na_ontap_pb_upgrade_firmware playbooks are illustrating three ways to use variables in an Ansible playbook: +1. directly inside the playbook, under the `vars:` keyword +1. by importing an external file, under the `vars_file:` keyword +1. by adding `--extra-vars` to the `ansible-playbook` command line. Using `@` enables to use a file rather than providing each variable explicitly. + +``` +ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml + +ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml + +ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml --extra-vars=@/tmp/ansible/ontap_vars_file.yml +``` + +The advantage of using a vars_file is that you can keep important variables private. --extra-vars provides more flexibility regarding the location of the vars file. diff --git a/ansible_collections/netapp/ontap/playbooks/examples/filter/test_na_filter_iso8601.yaml b/ansible_collections/netapp/ontap/playbooks/examples/filter/test_na_filter_iso8601.yaml new file mode 100644 index 000000000..0f5c704a0 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/filter/test_na_filter_iso8601.yaml @@ -0,0 +1,77 @@ +- + name: test netapp.ontap ISO8601 filters + gather_facts: false + hosts: localhost + + vars: + iso_duration: 'P689DT13H57M44S' + iso_duration_weeks: 'P98W' + seconds_duration: 59579864 + + + tasks: + - name: convert duration in ISO 8601 format to seconds + set_fact: + input: "{{ iso_duration }}" + out: "{{ iso_duration | netapp.ontap.iso8601_duration_to_seconds }}" + + - name: validate results + assert: + that: out | int == seconds_duration + quiet: true + + - name: convert seconds to duration in ISO 8601 format + set_fact: + input: "{{ seconds_duration }}" + out: "{{ seconds_duration | netapp.ontap.iso8601_duration_from_seconds }}" + + - name: validate results + assert: + that: out == iso_duration + quiet: true + + - name: convert seconds to duration in ISO 8601 format, using format specifier + set_fact: + input: "{{ seconds_duration }}" + out: "{{ seconds_duration | netapp.ontap.iso8601_duration_from_seconds(format='P%P') }}" + + - name: validate results + assert: + that: out == iso_duration + quiet: true + + - name: convert seconds to duration in ISO 8601 format, using format specifier for weeks + set_fact: + input: "{{ seconds_duration }}" + out: "{{ seconds_duration | netapp.ontap.iso8601_duration_from_seconds(format='P%p') }}" + + - name: validate results + assert: + that: out == iso_duration_weeks + quiet: true + + - name: input error, input does not match ISO format + set_fact: + out: "{{ 'dummy' | netapp.ontap.iso8601_duration_to_seconds }}" + ignore_errors: true + register: results + + - name: validate error message + assert: + that: results.msg == error + quiet: true + vars: + error: "iso8601_duration_to_seconds - error: Unable to parse duration string 'dummy' - expecting PnnYnnMnnDTnnHnnMnnS, received: dummy" + + - name: input error, input does not match int or float format + set_fact: + out: "{{ 'dummy' | netapp.ontap.iso8601_duration_from_seconds }}" + ignore_errors: true + register: results + + - name: validate error message + assert: + that: results.msg == error + quiet: true + vars: + error: "iso8601_duration_from_seconds - error: unsupported type for timedelta seconds component: str - received: dummy" diff --git a/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md b/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md new file mode 100644 index 000000000..0d3321af0 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/json_query/README.md @@ -0,0 +1,30 @@ +============================================================= + + netapp.ontap + + NetApp ONTAP Collection + + Copyright (c) 2020 NetApp, Inc. All rights reserved. + Specifications subject to change without notice. + +============================================================= +# Playbook examples + +As the name indicates, these are examples, and while they are working at the time of publication, we do not support these playbooks. +We cannot guarantee they are working on other systems, or other configurations, or other versions than what we used at the time. +We will not maintain these playbooks as time passes. + +## ONTAP list volumes that are online, or offline + +The na_ontap_pb_get_online_volumes playbook illustrate two ways to use json_query: +1. to flatten a complex structure and extract only the fields of interest, +2. to filter the fields of interest based on some criteria. + +The na_ontap_pb_get_online_volumes playbook illustrates three ways to use variables in an Ansible playbook: +1. directly inside the playbook, under the `vars:` keyword, +1. by importing an external file, under the `vars_files:` keyword, +1. by adding `--extra-vars` to the `ansible-playbook` command line. Using `@` enables to use a file rather than providing each variable explicitly. + +Note that `--extra-vars` has the highest precedence. `vars` has the lowest precedence. It is possible to comnbine the 3 techniques within a single playbook. + +The advantage of using a vars_file is that you can keep important variables private. --extra-vars provides more flexibility regarding the location of the vars file. diff --git a/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml b/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml new file mode 100644 index 000000000..5d58d17a9 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes.yml @@ -0,0 +1,76 @@ +- + name: Get list of online ONTAP volumes + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars_files: + # This will fail silently if the vars_file is not found. Remove '/dev/null' to force an error + # if --extra_vars is used to provide values for these variables, the values from vars_file are ignored + - ['/path/to/ontap_vars_file.yml', '/dev/null'] + + vars: + # TODO: change these value until DONE, unless a vars file or --extra_vars is used. + # If --extra_vars is used to provide values for these variables, the values below are ignored. + # If vars_files is used, the values below are ignored. + ontap_admin_ip: TBD + # username/password authentication + ontap_admin_username: admin + ontap_admin_password: TBD + # SSL certificate authentication + ontap_cert_filepath: "/path/to/test.pem" + ontap_key_filepath: "/path/to//test.key" + # optional, SVM login + ontap_svm_admin_ip: TBD + ontap_svm_admin_username: vsadmin + ontap_svm_admin_password: TBD + # we recommend to use https, with a valid certificate + ontap_use_https: true + ontap_validate_certs: false + # DONE + login: &login + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + cert_login: &cert_login + hostname: "{{ ontap_admin_ip }}" + cert_filepath: "{{ ontap_cert_filepath }}" + key_filepath: "{{ ontap_key_filepath }}" + https: true # ignored, as https is required for SSL + validate_certs: "{{ ontap_validate_certs }}" + svm_login: &svm_login + hostname: "{{ ontap_svm_admin_ip }}" + username: "{{ ontap_svm_admin_username }}" + password: "{{ ontap_svm_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + tasks: + - name: collect list of volumes, and state information + na_ontap_info: + <<: *cert_login + gather_subset: volume_info + desired_attributes: + volume-attributes: + volume-state-attributes: + state: + use_native_zapi_tags: false + register: ontap + - debug: var=ontap + tags: never + - set_fact: + volumes: "{{ ontap.ontap_info | json_query(get_attrs) }}" + vars: + get_attrs: "volume_info.*.{id: volume_id_attributes.name, svm: volume_id_attributes.owning_vserver_name, state: volume_state_attributes.state}" + - debug: var=volumes + - set_fact: + online_volumes: "{{ volumes | json_query(get_online) }}" + vars: + get_online: "[? state=='online']" + - debug: var=online_volumes + - set_fact: + offline_volumes: "{{ volumes | json_query(get_offline) }}" + vars: + get_offline: "[? state=='offline']" + - debug: var=offline_volumes diff --git a/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes_loop.yml b/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes_loop.yml new file mode 100644 index 000000000..41cfba21c --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/json_query/na_ontap_pb_get_online_volumes_loop.yml @@ -0,0 +1,85 @@ +- + name: Get list of online ONTAP volumes + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars_files: + # This will fail silently if the vars_file is not found. Remove '/dev/null' to force an error + # if --extra_vars is used to provide values for these variables, the values from vars_file are ignored + - ['/path/to/ontap_vars_file.yml', '/dev/null'] + + vars: + # TODO: change these value until DONE, unless a vars file or --extra_vars is used. + # If --extra_vars is used to provide values for these variables, the values below are ignored. + # If vars_files is used, the values below are ignored. + # cluster or vsserver IP addresses + ontap_admin_ips: + - ip1 + - ip2 + # username/password authentication + ontap_admin_username: admin + ontap_admin_password: netapp1! + # SSL certificate authentication + ontap_cert_filepath: "/path/to/test.pem" + ontap_key_filepath: "/path/to//test.key" + # optional, SVM login + ontap_svm_admin_username: vsadmin + ontap_svm_admin_password: TBD + # we recommend to use https, with a valid certificate + ontap_use_https: true + ontap_validate_certs: false + # DONE + login: &login + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + cert_login: &cert_login + cert_filepath: "{{ ontap_cert_filepath }}" + key_filepath: "{{ ontap_key_filepath }}" + https: true # ignored, as https is required for SSL + validate_certs: "{{ ontap_validate_certs }}" + svm_login: &svm_login + username: "{{ ontap_svm_admin_username }}" + password: "{{ ontap_svm_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + tasks: + - debug: var=ontap + tags: never + - debug: var=ontap.results + tags: xnever + - name: collect list of volumes, and state information + na_ontap_info: + hostname: "{{ item }}" + <<: *login + gather_subset: volume_info + desired_attributes: + volume-attributes: + volume-state-attributes: + state: + use_native_zapi_tags: false + register: ontap + loop: "{{ ontap_admin_ips }}" + loop_control: + label: "{{ item }}" + - set_fact: + volumes: "{{ volumes|default({}) | combine( {item.item: item.ontap_info | json_query(get_attrs)} ) }}" + vars: + get_attrs: "volume_info.*.{id: volume_id_attributes.name, svm: volume_id_attributes.owning_vserver_name, state: volume_state_attributes.state}" + loop: "{{ ontap.results }}" + - debug: var=volumes + - pause: + - set_fact: + online_volumes: "{{ online_volumes|default({}) | combine( {item.key: item.value | json_query(get_online)} ) }}" + vars: + get_online: "[? state=='online']" + loop: "{{ volumes | dict2items }}" + - debug: var=online_volumes + - set_fact: + offline_volumes: "{{ offline_volumes|default({}) | combine( {item.key: item.value | json_query(get_offline)} ) }}" + vars: + get_offline: "[? state=='offline']" + loop: "{{ volumes | dict2items }}" + - debug: var=offline_volumes diff --git a/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml new file mode 100644 index 000000000..18ced0517 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate.yml @@ -0,0 +1,209 @@ +# Example of installing a SSL certificate in ONTAP for authentication +# This playbook: +# 1. installs the certificate, or proceeds if the certificate is already installed, +# 2. enables SSL client authentication, +# 3. creates user account for cert authentication for ontapi and http applications, +# 4. validates that cert authentication works +# +# in test mode (using tags: -t all,testpb): +# 1b. the installation is repeated, to validate the check for idempotency (certificate already installed), +# 5. user account for cert authentication for ontapi and http applications is deleted, +# 6. if the certificate was installed in step 1, it is deleted. +# The certificate can be manually deleted using something like: +# security certificate delete -vserver trident_svm -common-name cert_user -ca cert_user -type * +# +# Prerequisites: +# you must have generated a certificate and have the certificate file (.pem) and the private key file available. +# This was tested using a self signed certificate: +# https://netapp.io/2016/11/08/certificate-based-authentication-netapp-manageability-sdk-ontap/ +- + name: Ontap Install SSL certificate and enable SSL certificate authentication + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars: + # TODO: change these variable values from HERE to DONE: + ontap_admin_ip: 10.XXX.XXX.X19 + ontap_admin_username: admin + ontap_admin_password: XXXXXXXX + # we recommend to use https, but it requires a valid SSL certificate + ontap_use_https: true + ontap_validate_certs: false + + # parameters to set up the certificate, ontap_cert_user must match the value of CN= when generating the certificate + ontap_cert_user: cert_user + ontap_cert_name: deleteme_cert + # admin or vsadmin + ontap_cert_role: vsadmin + # admin or data SVM + vserver: trident_svm + # admin or SVM IP address (for admin, would the same as ontap_admin_ip) + ontap_svm_ip: 10.XXX.XXX.X21 + # certificate and private key files + cert_filepath: "/home/laurentn/atelier/ansible_wsl/ansible-playbooks/test.pem" + key_filepath: "/home/laurentn/atelier/ansible_wsl/ansible-playbooks/test.key" + # set this to false if the certificate is self-signed + validate_certs_for_ssl_auth: false + + # you can either copy/paste the certificate(s) from the pem file, respecting the identation: + ssl_certificate_inline: | + -----BEGIN CERTIFICATE----- + MXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxx== + -----END CERTIFICATE----- + + # or read it directly from the pem file + ssl_certificate_from_file: "{{lookup('file', cert_filepath)}}" + + # pick one: + # ssl_certificate: "{{ ssl_certificate_inline }}" + ssl_certificate: "{{ ssl_certificate_from_file }}" + + # DONE - do not change anything else (unless you really want to) + + # this will be used to authenticate using SSL certificate + cert_login: &cert_login + hostname: "{{ ontap_svm_ip }}" + cert_filepath: "{{ cert_filepath }}" + key_filepath: "{{ key_filepath }}" + https: true + validate_certs: "{{ validate_certs_for_ssl_auth }}" + + login: &login + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + + tasks: + - name: run ontap info module to check connectivity + na_ontap_info: + <<: *login + gather_subset: ontap_system_version + register: ontap + - debug: var=ontap.ontap_info.ontap_version + + - name: use ZAPIT to install certificate + na_ontap_zapit: + <<: *login + zapi: + security-certificate-install: + cert-name: "{{ ontap_cert_name }}" + certificate: "{{ ssl_certificate }}" + type: client-ca + vserver: "{{ vserver }}" + ignore_errors: true + register: ontap + - debug: var=ontap + - fail: + msg: "Failed to install certificate: {{ ontap }}" + when: ontap.failed and ontap.reason != "duplicate entry" + - name: collect certificate data to be able to delete it later when testing + tags: never,testpb + set_fact: + certificate_authority: "{{ ontap.response.ca | default('unknown') }}" + serial_number: "{{ ontap.response.serial | default(0) }}" + certificate_installed: "{{ not ontap.failed }}" + - debug: var=certificate_authority + tags: never,testpb + - debug: var=serial_number + tags: never,testpb + - debug: var=certificate_installed + tags: never,testpb + + - name: use ZAPIT to install certificate (idempotency) + # use -t all,testpb when testing the playbook + tags: never,testpb + na_ontap_zapit: + <<: *login + zapi: + security-certificate-install: + cert-name: "{{ ontap_cert_name }}" + certificate: "{{ ssl_certificate }}" + type: client-ca + vserver: "{{ vserver }}" + ignore_errors: true + register: ontap + - debug: var=ontap + tags: never,testpb + - fail: + msg: "Failed to install certificate: {{ ontap }}" + tags: never,testpb + when: ontap.failed and ontap.reason != "duplicate entry" + + - name: use ZAPIT to enable certificate authentication + na_ontap_zapit: + <<: *login + zapi: + security-ssl-modify: + client-authentication-enabled: true + vserver: "{{ vserver }}" + register: ontap + - debug: var=ontap + tags: never,testpb + + - name: set up cert authentication for ontapi (ZAPI) and http (REST) + na_ontap_user: + <<: *login + applications: ontapi,http + authentication_method: cert + name: "{{ ontap_cert_user }}" + role_name: "{{ ontap_cert_role }}" + vserver: "{{ vserver }}" + register: ontap + - debug: var=ontap + tags: never,testpb + + - name: validate cert authentication is working for ZAPI + na_ontap_info: + <<: *cert_login + gather_subset: ontap_version + register: ontap + - debug: var=ontap + + - name: remove cert authentication for ontapi (ZAPI) and http (REST) when testing + tags: never,testpb + na_ontap_user: + <<: *login + state: absent + applications: ontapi,http + authentication_method: cert + name: "{{ ontap_cert_user }}" + role_name: "{{ ontap_cert_role }}" + vserver: "{{ vserver }}" + register: ontap + - debug: var=ontap + tags: never,testpb + + - name: use ZAPIT to delete certificate when testing + # use -t all,never when testing the playbook + tags: never,testpb,delete + na_ontap_zapit: + <<: *login + zapi: + security-certificate-delete: + certificate-authority: "{{ certificate_authority }}" + common-name: "{{ certificate_authority }}" + serial-number: "{{ serial_number }}" + type: client-ca + vserver: "{{ vserver }}" + when: certificate_installed diff --git a/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml new file mode 100644 index 000000000..3abe6104e --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_install_SSL_certificate_REST.yml @@ -0,0 +1,202 @@ +# Example of installing a SSL certificate in ONTAP for authentication +# This playbook: +# 1. installs the certificate, or proceeds if the certificate is already installed, +# (this also enables SSL client authentication), +# 2. creates user account for cert authentication for ontapi and http applications, +# 3. validates that cert authentication works +# +# in test mode (using tags: -t all,testpb): +# 1b. the installation is repeated, to validate the check for idempotency (certificate already installed), +# 4. user account for cert authentication for ontapi and http applications is deleted, +# 6. if the certificate was installed in step 1, it is deleted. +# The certificate can be manually deleted using something like: +# security certificate delete -vserver trident_svm -common-name cert_user -ca cert_user -type * +# +# Prerequisites: +# you must have generated a certificate and have the certificate file (.pem) and the private key file available. +# This was tested using a self signed certificate: +# https://netapp.io/2016/11/08/certificate-based-authentication-netapp-manageability-sdk-ontap/ +- + name: Ontap Install SSL certificate and enable SSL certificate authentication + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars: + # TODO: change these variable values from HERE to DONE: + ontap_admin_ip: 10.xxx.xxx.x19 + ontap_admin_username: admin + ontap_admin_password: xxxxxxxxx + # we recommend to use https, but it requires a valid SSL certificate + ontap_use_https: true + ontap_validate_certs: false + + # parameters to set up the certificate, ontap_cert_user must match the value of CN= when generating the certificate + ontap_cert_user: cert_user + ontap_cert_name: testme-cert + # data SVM, name and set role to vsadmin + svm: ansibleSVM + ontap_cert_role: vsadmin + # uncomment and leave the value empty for cluster certificate, set role to admin + # svm: + # ontap_cert_role: admin + # admin or SVM IP address (for admin, would the same as ontap_admin_ip) + ontap_svm_ip: 10.XXX.XXX.X21 + # certificate and private key files + cert_filepath: "/home/laurentn/atelier/wsl/ansible/ansible_collections/ansible_collection_ontap/test.pem" + key_filepath: "/home/laurentn/atelier/wsl/ansible/ansible_collections/ansible_collection_ontap/test.key" + # set this to false if the certificate is self-signed + validate_certs_for_ssl_auth: false + + # you can either copy/paste the certificate(s) from the pem file, respecting the identation: + ssl_certificate_inline: | + -----BEGIN CERTIFICATE----- + MXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxx + XXXXXXXXxxxxxxxxXXXXXXXXxxxxxxxxXXXXXXXXxx== + -----END CERTIFICATE----- + + # or read it directly from the pem file + ssl_certificate_from_file: "{{lookup('file', cert_filepath)}}" + + # pick one: + # ssl_certificate: "{{ ssl_certificate_inline }}" + ssl_certificate: "{{ ssl_certificate_from_file }}" + + # DONE - do not change anything else (unless you really want to) + + # this will be used to authenticate using SSL certificate + cert_login: &cert_login + hostname: "{{ ontap_admin_ip }}" + cert_filepath: "{{ cert_filepath }}" + key_filepath: "{{ key_filepath }}" + https: true + validate_certs: "{{ validate_certs_for_ssl_auth }}" + + login: &login + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + + tasks: + - name: run ontap info module to check connectivity + na_ontap_info: + <<: *login + gather_subset: ontap_system_version + register: ontap + - debug: var=ontap.ontap_info.ontap_version + + - name: install certificate + na_ontap_security_certificates: + <<: *login + common_name: "{{ ontap_cert_user }}" + name: "{{ ontap_cert_name }}" + public_certificate: "{{ ssl_certificate }}" + type: client_ca + svm: "{{ svm }}" + register: result + - debug: var=result + - assert: {that: result.changed, quiet: true} + + - name: install certificate (idempotency test) + # use -t all,testpb when testing the playbook + tags: never,testpb + na_ontap_security_certificates: + <<: *login + common_name: "{{ ontap_cert_user }}" + name: "{{ ontap_cert_name }}" + public_certificate: "{{ ssl_certificate }}" + type: client_ca + svm: "{{ svm }}" + register: result + - debug: var=result + tags: never,testpb + - assert: {that: not result.changed, quiet: true} + tags: never,testpb + + - name: set up cert authentication for ontapi (ZAPI) and http (REST) + na_ontap_user: + <<: *login + applications: ontapi,http + authentication_method: cert + name: "{{ ontap_cert_user }}" + role_name: "{{ ontap_cert_role }}" + svm: "{{ svm }}" + use_rest: Always + register: result + - debug: var=result + tags: never,testpb + - assert: {that: result.changed, quiet: true} + tags: never,testpb + + - name: validate cert authentication is working for REST + na_ontap_rest_info: + <<: *cert_login + gather_subset: vserver_info + register: result + - debug: var=result + + - name: remove cert authentication for ontapi (ZAPI) and http (REST) when testing + tags: never,testpb + na_ontap_user: + <<: *login + state: absent + applications: ontapi,http + authentication_method: cert + name: "{{ ontap_cert_user }}" + role_name: "{{ ontap_cert_role }}" + svm: "{{ svm }}" + use_rest: Always + register: result + - debug: var=result + tags: never,testpb + - assert: {that: result.changed, quiet: true} + tags: never,testpb + + - name: delete certificate when testing + # use -t all,never when testing the playbook + tags: never,testpb,delete + na_ontap_security_certificates: + <<: *login + common_name: "{{ ontap_cert_user }}" + name: "{{ ontap_cert_name }}" + svm: "{{ svm }}" + state: absent + register: result + - debug: var=result + tags: never,testpb,delete + - assert: {that: result.changed, quiet: true} + tags: never,testpb,delete + + - name: delete certificate when testing (idempotemcy) + # use -t all,never when testing the playbook + tags: never,testpb,delete + na_ontap_security_certificates: + <<: *login + common_name: "{{ ontap_cert_user }}" + name: "{{ ontap_cert_name }}" + svm: "{{ svm }}" + state: absent + register: result + - debug: var=result + tags: never,testpb,delete + - assert: {that: not result.changed, quiet: true} + tags: never,testpb,delete diff --git a/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml new file mode 100644 index 000000000..c6d7ed5da --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware.yml @@ -0,0 +1,46 @@ +- + name: Ontap Upgrade Firmware + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars: + # TODO: change these variable values + ontap_firmware_url: TBD + ontap_admin_ip: TBD + ontap_admin_username: admin + ontap_admin_password: TBD + # we recommend to use https, but it requires a valid SSL certificate + ontap_use_https: true + ontap_validate_certs: false + # DONE - do not change anything else + + login: &login + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + + tasks: + - name: run ontap info module to check connectivity + na_ontap_info: + <<: *login + gather_subset: ontap_system_version + register: ontap + - debug: var=ontap + + - name: run ontap command module to validate access permissions + na_ontap_command: + <<: *login + command: version + return_dict: false + register: ontap + - debug: var=ontap + + - name: run ontap firmware download module + na_ontap_firmware_upgrade: + <<: *login + package_url: "{{ ontap_firmware_url }}" + register: ontap + - debug: var=ontap diff --git a/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml new file mode 100644 index 000000000..c5ebf0c38 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml @@ -0,0 +1,47 @@ +- + name: Ontap Upgrade Firmware + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars: + # TODO: use --extra_vars to provide values for these variables + # ontap_firmware_url: TBD + # ontap_admin_ip: TBD + # ontap_admin_username: admin + # ontap_admin_password: TBD + # we recommend to use https, but it requires a valid SSL certificate + # if these variables are defined in --extra_vars, the following values are ignored + ontap_use_https: true + ontap_validate_certs: false + # do not change anything else + + login: &login + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + + tasks: + - name: run ontap info module to check connectivity + na_ontap_info: + <<: *login + gather_subset: ontap_system_version + register: ontap + - debug: var=ontap + + - name: run ontap command module to validate access permissions + na_ontap_command: + <<: *login + command: version + return_dict: false + register: ontap + - debug: var=ontap + + - name: run ontap firmware download module + na_ontap_firmware_upgrade: + <<: *login + package_url: "{{ ontap_firmware_url }}" + register: ontap + - debug: var=ontap diff --git a/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml new file mode 100644 index 000000000..30a73cf12 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_vars_file.yml @@ -0,0 +1,45 @@ +- + name: Ontap Upgrade Firmware + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars_files: + # TODO change this path as needed + - /tmp/ansible/ontap_vars_file.yml + vars: + # we recommend to use https, but it requires a valid SSL certificate + # if these variables are defined in the vars file, the following values are ignored + ontap_use_https: true + ontap_validate_certs: false + # DONE - do not change anything else + + login: &login + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: "{{ ontap_use_https }}" + validate_certs: "{{ ontap_validate_certs }}" + + tasks: + - name: run ontap info module to check connectivity + na_ontap_info: + <<: *login + gather_subset: ontap_system_version + register: ontap + - debug: var=ontap + + - name: run ontap command module to validate access permissions + na_ontap_command: + <<: *login + command: version + return_dict: false + register: ontap + - debug: var=ontap + + - name: run ontap firmware download module + na_ontap_firmware_upgrade: + <<: *login + package_url: "{{ ontap_firmware_url }}" + register: ontap + - debug: var=ontap diff --git a/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml b/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml new file mode 100644 index 000000000..7675e295f --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/ontap_vars_file.yml @@ -0,0 +1,27 @@ +# TODO: change these variable values +ontap_admin_ip: TBD +# either username/passord credentials +ontap_admin_username: admin +ontap_admin_password: TBD +# or SSL certificate authentication +ontap_cert_filepath: "/home/TBD/test.pem" +ontap_key_filepath: "/home/TBD/test.key" +# we recommend to use https, but it requires a valid SSL certificate +ontap_use_https: true +ontap_validate_certs: false +# Optionally, SVM credentials +ontap_svm_admin_ip: TBD +ontap_svm_admin_username: vsadmin +ontap_svm_admin_password: TBD +# Optionally, to upgrade disk, shelf, acp firmware +ontap_firmware_url: TBD +# DONE - do not change anything else +# +# To use this file: +# option 1: use ansible-playbook command line argument --extra-vars=@ +# for instance: +# ansible-playbook ansible_collections/netapp/ontap/playbooks/examples/na_ontap_pb_upgrade_firmware_with_extra_vars.yml --extra-vars=@/tmp/ansible/ontap_vars_file.yml +# option 2: include this file in your playbook using vars_files: +# for instance: +# vars_files: +# - diff --git a/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/clusters.yaml b/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/clusters.yaml new file mode 100644 index 000000000..4048eb5c7 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/clusters.yaml @@ -0,0 +1,19 @@ +admin_username: admin +admin_password: password +admin_ips: + - 10.10.10.11 + - 10.10.10.12 + - 10.10.10.21 +clusters: + cluster1: + admin_username: "{{ admin_username }}" + admin_password: "{{ admin_password }}" + admin_ip: "{{ admin_ips[0] }}" + cluster1_node1: + admin_username: "{{ admin_username }}" + admin_password: "{{ admin_password }}" + admin_ip: "{{ admin_ips[1] }}" + cluster2: + admin_username: "{{ admin_username }}" + admin_password: "{{ admin_password }}" + admin_ip: "{{ admin_ips[2] }}" diff --git a/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/list_aggregates.yaml b/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/list_aggregates.yaml new file mode 100644 index 000000000..a901760e3 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/list_aggregates.yaml @@ -0,0 +1,42 @@ +- + name: ONTAP list aggregates + gather_facts: false + hosts: localhost + collections: + - netapp.ontap + + module_defaults: + group/netapp.ontap.netapp_ontap: + hostname: '{{ clusters.cluster1.admin_ip }}' + username: '{{ clusters.cluster1.admin_username }}' + password: '{{ clusters.cluster1.admin_password }}' + https: true + validate_certs: false + + tasks: + - name: list aggregates + netapp.ontap.na_ontap_rest_info: + gather_subset: + - storage/aggregates + fields: 'block_storage,space' + # store the results to use them in another task + register: aggregates + +# call this play as: +# +# ansible-playbook -v list_aggregates.yaml -e@clusters.yaml +# +# with clusters.yaml providing credentials and IP addresses to connect to a cluster, eg: +# +# clusters: +# cluster1: +# admin_username: "{{ admin_username }}" +# admin_password: "{{ admin_password }}" +# admin_ip: "{{ admin_ips[0] }}" +# cluster2: +# admin_username: "{{ admin_username }}" +# admin_password: "{{ admin_password }}" +# admin_ip: "{{ admin_ips[1] }}" +# +# NOTE: module_defaults requires Ansible 2.12 as a minimum. +# With earlier versions of Ansible, move the values from lines 10 to 14 under the tasks, or use an alias. diff --git a/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/volumes.yml b/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/volumes.yml new file mode 100644 index 000000000..89e518dff --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/rest_apis/volumes.yml @@ -0,0 +1,160 @@ +- + name: Ontap REST API + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + vars: + admin_ip: XXX.XXX.XXX.XXX + admin_username: XXXXXXXX + admin_password: XXXXXXXX + svm_name: ansibleSVM + + login: &login + hostname: "{{ admin_ip }}" + username: "{{ admin_username }}" + password: "{{ admin_password }}" + https: true + validate_certs: false + feature_flags: + trace_apis: true + tasks: + - name: run ontap REST API command as cluster admin - get version + na_ontap_restit: + <<: *login + api: cluster/software + query: + fields: version + register: result + - assert: {that: result.status_code==200, quiet: true} + + - name: run ontap REST API command as cluster admin - get list of SVMs + na_ontap_restit: + <<: *login + api: svm/svms + register: result + - assert: {that: result.status_code==200, quiet: true} + + - name: run ontap REST API command as cluster admin - get list of aggregates for this SVM + na_ontap_restit: + <<: *login + api: svm/svms + query: + fields: aggregates,cifs,nfs,uuid + query_fields: name + query: "{{ svm_name }}" + hal_linking: true + register: result + + - name: run ontap REST API command as cluster admin - delete volume + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + query: # query based DELETE does not require a UUID + name: deleteme_ln1 + svm.name: "{{ svm_name }}" + method: DELETE + wait_for_completion: true + register: result + + - name: run ontap REST API command as cluster admin - create volume + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + body: + name: deleteme_ln1 + aggregates.name: + - aggr1 + svm.name: "{{ svm_name }}" + method: POST + wait_for_completion: true + register: result + - assert: {that: result.response.job_response=='success', quiet: true} + + - name: run ontap REST API command as cluster admin - create volume - already exists! + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + body: + name: deleteme_ln1 + aggregates.name: + - aggr1 + svm.name: "{{ svm_name }}" + method: POST + wait_for_completion: true + ignore_errors: true + register: result + - assert: + that: msg in result.error_message + quiet: true + vars: + msg: 'Duplicate volume name' + + - name: run ontap REST API command as cluster admin - patch volume (rename) + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + query: # query based DELETE does not require a UUID + name: deleteme_ln1 + svm.name: "{{ svm_name }}" + body: + name: deleteme_ln2 + method: PATCH + wait_for_completion: true + register: result + + - name: run ontap REST API command as cluster admin - delete volume + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + query: # query based DELETE does not require a UUID + name: deleteme_ln2 + svm.name: "{{ svm_name }}" + method: DELETE + wait_for_completion: true + register: result + + - name: run ontap REST API command as cluster admin - create volume (vserver tunneling) + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + body: + name: deleteme_ln1 + aggregates.name: + - aggr1 + vserver_name: "{{ svm_name }}" + method: POST + wait_for_completion: true + register: result + + - name: run ontap REST API command as cluster admin - patch volume (rename) (vserver tunneling) + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + query: # query based DELETE does not require a UUID + name: deleteme_* + vserver_name: "{{ svm_name }}" + body: + name: deleteme_ln2 + method: PATCH + wait_for_completion: true + register: result + + - name: run ontap REST API command as cluster admin - delete volume (vserver tunneling) + tags: create + na_ontap_restit: + <<: *login + api: storage/volumes + query: # query based DELETE does not require a UUID + name: deleteme_ln2 + vserver_name: "{{ svm_name }}" + method: DELETE + wait_for_completion: true + register: result diff --git a/ansible_collections/netapp/ontap/playbooks/examples/support/debug_connectivity.yaml b/ansible_collections/netapp/ontap/playbooks/examples/support/debug_connectivity.yaml new file mode 100644 index 000000000..3c664c058 --- /dev/null +++ b/ansible_collections/netapp/ontap/playbooks/examples/support/debug_connectivity.yaml @@ -0,0 +1,42 @@ +- + name: ONTAP connect + hosts: localhost + gather_facts: false + collections: + - netapp.ontap + + tasks: + - name: debug connectivity using admin management interface or vsadmin interface + # use this to validate ZAPI and REST connectivity + # - with admin management interface, use admin or a user with admin privileges + # - with vsadmin management interface, use vsadmin or a user with vsadmin privileges + # for better formatting, you may use: + # export ANSIBLE_STDOUT_CALLBACK=minimal + # run this as: + # ansible-playbook -v ansible_collections/netapp/ontap/playbooks/examples/support/debug_connectivity.yaml + # after updating the values for hostname, username, and password + tags: + - admin + - vsadmin + na_ontap_debug: + hostname: "ip address of management interface, or of vserver interface" + username: "xxxx" + password: "yyyy" + https: true + validate_certs: false + + - name: debug connectivity using admin interface, validate vserver configuration + # use this to validate ZAPI and REST connectivity, and check vserver is reachable + # with admin management interface, use admin or a user with admin privileges + # run this as + # ansible-playbook -v ansible_collections/netapp/ontap/playbooks/examples/support/debug_connectivity.yaml -t admin_and_vserver + tags: + - never + - admin_and_vserver + na_ontap_debug: + hostname: "ip_address_of_management_interface" + username: "xxxx" + password: "yyyy" + vserver: "svm name" + https: true + validate_certs: false diff --git a/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..6acfea61d --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/doc_fragments/netapp.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018-2022, Sumit Kumar , chris Archibald +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire +''' + # Documentation fragment for ONTAP (na_ontap) that contains REST + NA_ONTAP = r''' +options: + hostname: + description: + - The hostname or IP address of the ONTAP instance. + type: str + required: true + username: + description: + - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. + - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/). + - Two authentication methods are supported + - 1. basic authentication, using username and password, + - 2. SSL certificate authentication, using a ssl client cert file, and optionally a private key file. + - To use a certificate, the certificate must have been installed in the ONTAP cluster, and cert authentication must have been enabled. + type: str + aliases: [ user ] + password: + description: + - Password for the specified user. + type: str + aliases: [ pass ] + cert_filepath: + description: + - path to SSL client cert file (.pem). + - not supported with python 2.6. + type: str + version_added: 20.6.0 + key_filepath: + description: + - path to SSL client key file. + type: str + version_added: 20.6.0 + https: + description: + - Enable and disable https. + - Ignored when using REST as only https is supported. + - Ignored when using SSL certificate authentication as it requires SSL. + type: bool + default: no + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(False) used on personally controlled sites using self-signed certificates. + type: bool + default: yes + http_port: + description: + - Override the default port (80 or 443) with this port + type: int + ontapi: + description: + - The ontap api version to use + type: int + use_rest: + description: + - Whether to use REST or ZAPI. + - always -- will always use the REST API if the module supports REST. + A warning is issued if the module does not support REST. + An error is issued if a module option is not supported in REST. + - never -- will always use ZAPI if the module supports ZAPI. An error may be issued if a REST option is not supported in ZAPI. + - auto -- will try to use the REST API if the module supports REST and modules options are supported. Reverts to ZAPI otherwise. + default: auto + type: str + feature_flags: + description: + - Enable or disable a new feature. + - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility. + - Supported keys and values are subject to change without notice. Unknown keys are ignored. + type: dict + version_added: "20.5.0" + force_ontap_version: + description: + - Override the cluster ONTAP version when using REST. + - The behavior is undefined if the version does not match the target cluster. + - This is provided as a work-around when the cluster version cannot be read because of permission issues. + See https://github.com/ansible-collections/netapp.ontap/wiki/Known-issues. + - This should be in the form 9.10 or 9.10.1 with each element being an integer number. + - When C(use_rest) is set to auto, this may force a switch to ZAPI based on the version and platform capabilities. + - Ignored with ZAPI. + type: str + version_added: "21.23.0" +requirements: + - Ansible 2.9 or later - 2.12 or later is recommended. + - Python3 - 3.9 or later is recommended. + - When using ZAPI, netapp-lib 2018.11.13 or later (install using 'pip install netapp-lib'), + netapp-lib 2020.3.12 is strongly recommended as it provides better error reporting for connection issues + - a physical or virtual clustered Data ONTAP system, the modules support Data ONTAP 9.1 and onward, + REST support requires ONTAP 9.6 or later + +notes: + - The modules prefixed with na_ontap are built to support the ONTAP storage platform. + - https is enabled by default and recommended. + To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;' + ''' + + # Documentation fragment for ONTAP (na_ontap) that are ZAPI ONLY + NA_ONTAP_ZAPI = r''' +options: + hostname: + description: + - The hostname or IP address of the ONTAP instance. + type: str + required: true + username: + description: + - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. + - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/). + - Two authentication methods are supported + - 1. basic authentication, using username and password, + - 2. SSL certificate authentication, using a ssl client cert file, and optionally a private key file. + - To use a certificate, the certificate must have been installed in the ONTAP cluster, and cert authentication must have been enabled. + type: str + aliases: [ user ] + password: + description: + - Password for the specified user. + type: str + aliases: [ pass ] + cert_filepath: + description: + - path to SSL client cert file (.pem). + - not supported with python 2.6. + type: str + version_added: 20.6.0 + key_filepath: + description: + - path to SSL client key file. + type: str + version_added: 20.6.0 + https: + description: + - Enable and disable https. + - Ignored when using REST as only https is supported. + - Ignored when using SSL certificate authentication as it requires SSL. + type: bool + default: no + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(False) used on personally controlled sites using self-signed certificates. + type: bool + default: yes + http_port: + description: + - Override the default port (80 or 443) with this port + type: int + ontapi: + description: + - The ontap api version to use + type: int + use_rest: + description: + - This module only support ZAPI and will can not be swtich to REST + - never -- will always use ZAPI if the module supports ZAPI. An error may be issued if a REST option is not supported in ZAPI. + - auto -- will always use ZAPI. + default: never + type: str + feature_flags: + description: + - Enable or disable a new feature. + - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility. + - Supported keys and values are subject to change without notice. Unknown keys are ignored. + type: dict + version_added: "20.5.0" +requirements: + - Ansible 2.9 or later - 2.12 or later is recommended. + - Python3 - 3.9 or later is recommended. + - When using ZAPI, netapp-lib 2018.11.13 or later (install using 'pip install netapp-lib'), + netapp-lib 2020.3.12 is strongly recommended as it provides better error reporting for connection issues + - a physical or virtual clustered Data ONTAP system, the modules support Data ONTAP 9.1 and onward, + REST support requires ONTAP 9.6 or later + +notes: + - The modules prefixed with na_ontap are built to support the ONTAP storage platform. + - https is enabled by default and recommended. + To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;' + ''' + + # Documentation fragment for ONTAP (na_ontap) peer options + NA_ONTAP_PEER = r''' +options: + peer_options: + version_added: 21.8.0 + description: + - IP address and connection options for the peer system. + - If any if these options is not specified, the corresponding source option is used. + type: dict + suboptions: + hostname: + description: + - The hostname or IP address of the ONTAP instance. + type: str + required: true + username: + description: + - Username when using basic authentication. + type: str + aliases: [ user ] + password: + description: + - Password for the specified user. + type: str + aliases: [ pass ] + cert_filepath: + description: + - path to SSL client cert file (.pem). + type: str + key_filepath: + description: + - path to SSL client key file. + type: str + https: + description: + - Enable and disable https. + type: bool + validate_certs: + description: + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(False) used on personally controlled sites using self-signed certificates. + type: bool + http_port: + description: + - Override the default port (80 or 443) with this port + type: int + ontapi: + description: + - The ontap api version to use + type: int + use_rest: + description: + - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI. + - always -- will always use the REST API + - never -- will always use the ZAPI + - auto -- will try to use the REST Api + type: str + force_ontap_version: + description: + - Override the cluster ONTAP version when using REST. + - The behavior is undefined if the version does not match the target cluster. + - This is provided as a work-around when the cluster version cannot be read because of permission issues. + See https://github.com/ansible-collections/netapp.ontap/wiki/Known-issues. + - This should be in the form 9.10 or 9.10.1 with each element being an integer number. + - When C(use_rest) is set to auto, this may force a switch to ZAPI based on the version and platform capabilities. + - Ignored with ZAPI. + type: str + version_added: "21.23.0" +''' diff --git a/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml new file mode 100644 index 000000000..e605469ce --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_from_seconds.yml @@ -0,0 +1,33 @@ +DOCUMENTATION: + name: iso8601_duration_from_seconds + author: NetApp Ansible Team (@carchi8py) + version_added: 21.24.0 + short_description: Encode seconds as a ISO 8601 duration string + description: + - Encode seconds as a ISO 8601 duration string. + positional: _input + options: + _input: + description: A number of seconds to encode. + type: float + required: true + format: + description: An optional format string for isodate.duration_isoformat. Defaults to P%P. + type: string + notes: + - requires isodate and datetime python modules. + - set filter_plugins path to /ansible_collections/netapp/ontap/plugins/filter in ansible.cfg. + - documentation can be generated locally using a version of ansible-doc (2.14) that supports '-t filter' + - ansible-doc -t filter netapp.ontap.iso8601_duration_to_seconds + +EXAMPLES: | + # Encode seconds + iso_duration: "{{ 59579864 | netapp.ontap.iso8601_duration_from_seconds }}" + + # Encode 'duration_in_seconds' variable + iso_duration: "{{ duration_in_seconds | netapp.ontap.iso8601_duration_from_seconds }}" + +RETURN: + _value: + description: A string representing the duration in ISO 8601 format. + type: string diff --git a/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml new file mode 100644 index 000000000..1fd796938 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/filter/iso8601_duration_to_seconds.yml @@ -0,0 +1,30 @@ +DOCUMENTATION: + name: iso8601_duration_to_seconds + author: NetApp Ansible Team (@carchi8py) + version_added: 21.24.0 + short_description: Decode a ISO 8601 duration string as seconds + description: + - Decode a ISO 8601 duration string as seconds + positional: _input + options: + _input: + description: A string to decode + type: string + required: true + notes: + - requires isodate and datetime python modules. + - set filter_plugins path to /ansible_collections/netapp/ontap/plugins/filter in ansible.cfg. + - documentation can be generated locally using a version of ansible-doc (2.14) that supports '-t filter' + - ansible-doc -t filter netapp.ontap.iso8601_duration_to_seconds + +EXAMPLES: | + # Decode a string + duration_in_seconds: "{{ 'P689DT13H57M44S' | netapp.ontap.iso8601_duration_to_seconds }}" + + # Decode 'iso_duration' variable + duration_in_seconds: "{{ iso_duration | netapp.ontap.iso8601_duration_to_seconds }}" + +RETURN: + _value: + description: A float representing the number of seconds. The fractional part may represent milliseconds. + type: float diff --git a/ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py b/ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py new file mode 100644 index 000000000..7494e3878 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/filter/na_filter_iso8601.py @@ -0,0 +1,53 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +Filters for ISO 8601 durations +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.errors import AnsibleFilterError +from ansible.module_utils._text import to_native + +IMPORT_ERROR = None +try: + import isodate +except ImportError as exc: + IMPORT_ERROR = to_native(exc) + + +class FilterModule: + ''' Ansible jinja2 filters ''' + + def filters(self): + return { + 'iso8601_duration_to_seconds': iso8601_duration_to_seconds, + 'iso8601_duration_from_seconds': iso8601_duration_from_seconds, + } + + +def check_for_import(): + if IMPORT_ERROR: + raise AnsibleFilterError("isodate python package is required: %s" % IMPORT_ERROR) + + +def iso8601_duration_to_seconds(duration): + check_for_import() + try: + dt_duration = isodate.parse_duration(duration) + except Exception as exc: + raise AnsibleFilterError("iso8601_duration_to_seconds - error: %s - expecting PnnYnnMnnDTnnHnnMnnS, received: %s" % (to_native(exc), duration)) + return dt_duration.total_seconds() + + +def iso8601_duration_from_seconds(seconds, format=None): + check_for_import() + try: + duration = isodate.Duration(seconds=seconds) + iso8601_duration = isodate.duration_isoformat(duration, format=isodate.D_DEFAULT if format is None else format) + except Exception as exc: + raise AnsibleFilterError("iso8601_duration_from_seconds - error: %s - received: %s" % (to_native(exc), seconds)) + return iso8601_duration diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py new file mode 100644 index 000000000..28d9428a2 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp.py @@ -0,0 +1,1134 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2017, Sumit Kumar +# Copyright (c) 2017, Michael Price +# Copyright (c) 2017-2023, NetApp, Inc +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' +netapp.py +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import base64 +import logging +import os +import ssl +import time +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils._text import to_native + +try: + from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION +except ImportError: + ANSIBLE_VERSION = 'unknown' + +COLLECTION_VERSION = "22.7.0" +CLIENT_APP_VERSION = "%s/%s" % ("%s", COLLECTION_VERSION) +IMPORT_EXCEPTION = None + +try: + from netapp_lib.api.zapi import zapi + HAS_NETAPP_LIB = True +except ImportError as exc: + HAS_NETAPP_LIB = False + IMPORT_EXCEPTION = exc + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + +HAS_SF_SDK = False +SF_BYTE_MAP = dict( + # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000. + bytes=1, + b=1, + kb=1000, + mb=1000 ** 2, + gb=1000 ** 3, + tb=1000 ** 4, + pb=1000 ** 5, + eb=1000 ** 6, + zb=1000 ** 7, + yb=1000 ** 8 +) + +POW2_BYTE_MAP = dict( + # Here, 1 kb = 1024 + bytes=1, + b=1, + k=1024, + m=1024 ** 2, + g=1024 ** 3, + t=1024 ** 4, + p=1024 ** 5, + e=1024 ** 6, + z=1024 ** 7, + y=1024 ** 8, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8, +) + +ERROR_MSG = dict( + no_cserver='This module is expected to run as cluster admin' +) + +LOG = logging.getLogger(__name__) +LOG_FILE = '/tmp/ontap_apis.log' +ZAPI_DEPRECATION_MESSAGE = "With version 22.0.0 ONTAPI (ZAPI) has been deprecated. The final ONTAP version to support ZAPI is ONTAP 9.13.1. "\ + "ZAPI calls in these modules will continue to work for ONTAP versions that supports ZAPI. "\ + "You can update your playbook to use REST by adding use_rest: always to your playbook. "\ + "More information can be found at: https://github.com/ansible-collections/netapp.ontap" + +try: + from solidfire.factory import ElementFactory + HAS_SF_SDK = True +except ImportError: + HAS_SF_SDK = False + + +def has_netapp_lib(): + return HAS_NETAPP_LIB + + +def netapp_lib_is_required(): + return "Error: the python NetApp-Lib module is required. Import error: %s" % str(IMPORT_EXCEPTION) + + +def has_sf_sdk(): + return HAS_SF_SDK + + +def na_ontap_zapi_only_spec(): + # This is used for Zapi only Modules. + + return dict( + hostname=dict(required=True, type='str'), + username=dict(required=False, type='str', aliases=['user']), + password=dict(required=False, type='str', aliases=['pass'], no_log=True), + https=dict(required=False, type='bool', default=False), + validate_certs=dict(required=False, type='bool', default=True), + http_port=dict(required=False, type='int'), + ontapi=dict(required=False, type='int'), + use_rest=dict(required=False, type='str', default='never'), + feature_flags=dict(required=False, type='dict'), + cert_filepath=dict(required=False, type='str'), + key_filepath=dict(required=False, type='str', no_log=False), + ) + + +def na_ontap_host_argument_spec(): + # This is used for Zapi + REST, and REST only Modules. + + return dict( + hostname=dict(required=True, type='str'), + username=dict(required=False, type='str', aliases=['user']), + password=dict(required=False, type='str', aliases=['pass'], no_log=True), + https=dict(required=False, type='bool', default=False), + validate_certs=dict(required=False, type='bool', default=True), + http_port=dict(required=False, type='int'), + ontapi=dict(required=False, type='int'), + use_rest=dict(required=False, type='str', default='auto'), + feature_flags=dict(required=False, type='dict'), + cert_filepath=dict(required=False, type='str'), + key_filepath=dict(required=False, type='str', no_log=False), + force_ontap_version=dict(required=False, type='str') + ) + + +def na_ontap_host_argument_spec_peer(): + spec = na_ontap_host_argument_spec() + spec.pop('feature_flags') + # get rid of default values, as we'll use source values + for value in spec.values(): + if 'default' in value: + value.pop('default') + return spec + + +def has_feature(module, feature_name): + feature = get_feature(module, feature_name) + if isinstance(feature, bool): + return feature + module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name) + + +def get_feature(module, feature_name): + ''' if the user has configured the feature, use it + otherwise, use our default + ''' + default_flags = dict( + strict_json_check=True, # when true, fail if response.content in not empty and is not valid json + trace_apis=False, # when true, append ZAPI and REST requests/responses to /tmp/ontap_zapi.txt + trace_headers=False, # when true, headers are not redacted in send requests + trace_auth_args=False, # when true, auth_args are not redacted in send requests + check_required_params_for_none=True, + classic_basic_authorization=False, # use ZAPI wrapper to send Authorization header + deprecation_warning=True, + sanitize_xml=True, + sanitize_code_points=[8], # unicode values, 8 is backspace + show_modified=True, + always_wrap_zapi=True, # for better error reporting + flexcache_delete_return_timeout=5, # ONTAP bug if too big? + # for SVM, whch protocols can be allowed + svm_allowable_protocols_rest=['cifs', 'fcp', 'iscsi', 'nvme', 'nfs', 'ndmp'], + svm_allowable_protocols_zapi=['cifs', 'fcp', 'iscsi', 'nvme', 'nfs', 'ndmp', 'http'], + max_files_change_threshold=1, # percentage of increase/decrease required to trigger a modify action + warn_or_fail_on_fabricpool_backend_change='fail', + no_cserver_ems=False # when True, don't attempt to find cserver and don't send cserver EMS + ) + + if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']: + return module.params['feature_flags'][feature_name] + if feature_name in default_flags: + return default_flags[feature_name] + module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name) + + +def create_sf_connection(module, port=None, host_options=None): + if not HAS_SF_SDK: + module.fail_json(msg="the python SolidFire SDK module is required") + + if host_options is None: + host_options = module.params + msg, msg2 = None, None + missing_options = [option for option in ('hostname', 'username', 'password') if not host_options.get(option)] + if missing_options: + verb = 'are' if len(missing_options) > 1 else 'is' + msg = "%s %s required for ElementSW connection." % (', '.join(missing_options), verb) + extra_options = [option for option in ('cert_filepath', 'key_filepath') if host_options.get(option)] + if extra_options: + verb = 'are' if len(extra_options) > 1 else 'is' + msg2 = "%s %s not supported for ElementSW connection." % (', '.join(extra_options), verb) + msg = "%s %s" % (msg, msg2) if msg and msg2 else msg or msg2 + if msg: + module.fail_json(msg=msg) + hostname = host_options.get('hostname') + username = host_options.get('username') + password = host_options.get('password') + + try: + return ElementFactory.create(hostname, username, password, port=port) + except Exception as exc: + raise Exception("Unable to create SF connection: %s" % exc) + + +def set_auth_method(module, username, password, cert_filepath, key_filepath): + error = None + if password is None and username is None: + if cert_filepath is None: + error = ('Error: cannot have a key file without a cert file' if key_filepath is not None + else 'Error: ONTAP module requires username/password or SSL certificate file(s)') + else: + auth_method = 'single_cert' if key_filepath is None else 'cert_key' + elif password is not None and username is not None: + if cert_filepath is not None or key_filepath is not None: + error = 'Error: cannot have both basic authentication (username/password) ' +\ + 'and certificate authentication (cert/key files)' + else: + auth_method = 'basic_auth' if has_feature(module, 'classic_basic_authorization') else 'speedy_basic_auth' + else: + error = 'Error: username and password have to be provided together' + if cert_filepath is not None or key_filepath is not None: + error += ' and cannot be used with cert or key files' + if error: + module.fail_json(msg=error) + return auth_method + + +def setup_host_options_from_module_params(host_options, module, keys): + '''if an option is not set, use primary value. + but don't mix up basic and certificate authentication methods + + host_options is updated in place + option values are read from module.params + keys is a list of keys that need to be added/updated/left alone in host_options + ''' + password_keys = ['username', 'password'] + certificate_keys = ['cert_filepath', 'key_filepath'] + use_password = any(host_options.get(x) is not None for x in password_keys) + use_certificate = any(host_options.get(x) is not None for x in certificate_keys) + if use_password and use_certificate: + module.fail_json( + msg='Error: host cannot have both basic authentication (username/password) and certificate authentication (cert/key files).') + if use_password: + exclude_keys = certificate_keys + elif use_certificate: + exclude_keys = password_keys + else: + exclude_keys = [] + for key in keys: + if host_options.get(key) is None and key not in exclude_keys: + # use same value as source if no value is given for dest + host_options[key] = module.params[key] + + +def set_zapi_port_and_transport(server, https, port, validate_certs): + # default is HTTP + if https: + if port is None: + port = 443 + transport_type = 'HTTPS' + # HACK to bypass certificate verification + if validate_certs is False and not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None): + ssl._create_default_https_context = ssl._create_unverified_context + else: + if port is None: + port = 80 + transport_type = 'HTTP' + server.set_transport_type(transport_type) + server.set_port(port) + + +def setup_na_ontap_zapi(module, vserver=None, wrap_zapi=False, host_options=None): + module.warn(ZAPI_DEPRECATION_MESSAGE) + if host_options is None: + host_options = module.params + hostname = host_options.get('hostname') + username = host_options.get('username') + password = host_options.get('password') + cert_filepath = host_options.get('cert_filepath') + key_filepath = host_options.get('key_filepath') + https = host_options.get('https') + validate_certs = host_options.get('validate_certs') + port = host_options.get('http_port') + version = host_options.get('ontapi') + trace = has_feature(module, 'trace_apis') + if trace: + logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s') + wrap_zapi |= has_feature(module, 'always_wrap_zapi') + auth_method = set_auth_method(module, username, password, cert_filepath, key_filepath) + + if not HAS_NETAPP_LIB: + module.fail_json(msg=netapp_lib_is_required()) + + # set up zapi + if auth_method in ('single_cert', 'cert_key'): + # override NaServer in netapp-lib to enable certificate authentication + server = OntapZAPICx(hostname, module=module, username=username, password=password, + validate_certs=validate_certs, cert_filepath=cert_filepath, + key_filepath=key_filepath, style=zapi.NaServer.STYLE_CERTIFICATE, + auth_method=auth_method, trace=trace) + # SSL certificate authentication requires SSL + https = True + elif auth_method == 'speedy_basic_auth' or wrap_zapi: + # override NaServer in netapp-lib to add Authorization header preemptively + # use wrapper to handle parse error (mostly for na_ontap_command) + server = OntapZAPICx(hostname, module=module, username=username, password=password, + validate_certs=validate_certs, auth_method=auth_method, trace=trace) + else: + # legacy netapp-lib + server = zapi.NaServer(hostname, username=username, password=password, trace=trace) + if vserver: + server.set_vserver(vserver) + if host_options.get('use_rest') == 'always': + note = '' if https else ' Note: https is set to false.' + module.warn("Using ZAPI for %s, ignoring 'use_rest: always'.%s" % (module._name, note)) + + set_zapi_port_and_transport(server, https, port, validate_certs) + server.set_api_version(major=1, minor=(version or 110)) + server.set_server_type('FILER') + return server + + +def is_zapi_connection_error(message): + ''' return True if it is a connection issue ''' + # netapp-lib message may contain a tuple or a str! + try: + if isinstance(message, tuple) and isinstance(message[0], ConnectionError): + return True + except NameError: + # python 2.7 does not know about ConnectionError + pass + return isinstance(message, str) and message.startswith(('URLError', 'Unauthorized')) + + +def is_zapi_write_access_error(message): + ''' return True if it is a write access error ''' + # netapp-lib message may contain a tuple or a str! + if isinstance(message, str) and message.startswith('Insufficient privileges:'): + return 'does not have write access' in message + return False + + +def is_zapi_missing_vserver_error(message): + ''' return True if it is a missing vserver error ''' + # netapp-lib message may contain a tuple or a str! + return isinstance(message, str) and message in ('Vserver API missing vserver parameter.', 'Specified vserver not found') + + +def get_cserver_zapi(server): + ''' returns None if not run on the management or cluster IP ''' + vserver_info = zapi.NaElement('vserver-get-iter') + query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'}) + query = zapi.NaElement('query') + query.add_child_elem(query_details) + vserver_info.add_child_elem(query) + try: + result = server.invoke_successfully(vserver_info, + enable_tunneling=False) + except zapi.NaApiError as exc: + # Do not fail if we can't connect to the server. + # The module will report a better error when trying to get some data from ONTAP. + if is_zapi_connection_error(exc.message): + return None + # raise on other errors, as it may be a bug in calling the ZAPI + raise exc + attribute_list = result.get_child_by_name('attributes-list') + if attribute_list is not None: + vserver_list = attribute_list.get_child_by_name('vserver-info') + if vserver_list is not None: + return vserver_list.get_child_content('vserver-name') + return None + + +def classify_zapi_exception(error): + ''' return type of error ''' + try: + # very unlikely to fail, but don't take any chance + err_code = int(error.code) + except (AttributeError, ValueError): + err_code = 0 + try: + # very unlikely to fail, but don't take any chance + err_msg = error.message + except AttributeError: + err_msg = "" + if err_code == 13005 and err_msg.startswith('Unable to find API:') and 'data vserver' in err_msg: + return 'missing_vserver_api_error', 'Most likely running a cluster level API as vserver: %s' % to_native(error) + if err_code == 13001 and err_msg.startswith("RPC: Couldn't make connection"): + return 'rpc_error', to_native(error) + return "other_error", to_native(error) + + +def get_cserver(connection, is_rest=False): + if not is_rest: + return get_cserver_zapi(connection) + + params = {'fields': 'type'} + api = "private/cli/vserver" + json, error = connection.get(api, params) + if json is None or error is not None: + # exit if there is an error or no data + return None + vservers = json.get('records') + if vservers is not None: + for vserver in vservers: + if vserver['type'] == 'admin': # cluster admin + return vserver['vserver'] + if len(vservers) == 1: # assume vserver admin + return vservers[0]['vserver'] + + return None + + +def generate_result(changed, actions=None, modify=None, response=None, extra_responses=None): + result = dict(changed=changed) + if response is not None: + result['response'] = response + if modify: + result['modify'] = modify + if actions: + result['actions'] = actions + if extra_responses: + result.update(extra_responses) + return result + + +if HAS_NETAPP_LIB: + class OntapZAPICx(zapi.NaServer): + ''' override zapi NaServer class to: + - enable SSL certificate authentication + - ignore invalid XML characters in ONTAP output (when using CLI module) + - add Authorization header when using basic authentication + ''' + def __init__(self, hostname=None, server_type=zapi.NaServer.SERVER_TYPE_FILER, + transport_type=zapi.NaServer.TRANSPORT_TYPE_HTTP, + style=zapi.NaServer.STYLE_LOGIN_PASSWORD, username=None, + password=None, port=None, trace=False, module=None, + cert_filepath=None, key_filepath=None, validate_certs=None, + auth_method=None): + # python 2.x syntax, but works for python 3 as well + super(OntapZAPICx, self).__init__(hostname, server_type=server_type, + transport_type=transport_type, + style=style, username=username, + password=password, port=port, trace=trace) + self.cert_filepath = cert_filepath + self.key_filepath = key_filepath + self.validate_certs = validate_certs + self.module = module + self.base64_creds = None + if auth_method == 'speedy_basic_auth': + auth = '%s:%s' % (username, password) + self.base64_creds = base64.b64encode(auth.encode()).decode() + + def _create_certificate_auth_handler(self): + try: + context = ssl.create_default_context() + except AttributeError as exc: + self._fail_with_exc_info('SSL certificate authentication requires python 2.7 or later.', exc) + + if not self.validate_certs: + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + try: + context.load_cert_chain(self.cert_filepath, keyfile=self.key_filepath) + except IOError as exc: + self._fail_with_exc_info('Cannot load SSL certificate, check files exist.', exc) + + return zapi.urllib.request.HTTPSHandler(context=context) + + def _fail_with_exc_info(self, arg0, exc): + msg = arg0 + msg += ' More info: %s' % repr(exc) + self.module.fail_json(msg=msg) + + def sanitize_xml(self, response): + # some ONTAP CLI commands return BEL on error + new_response = response.replace(b'\x07\n', b'') + # And 9.1 uses \r\n rather than \n ! + new_response = new_response.replace(b'\x07\r\n', b'') + # And 9.7 may send backspaces + for code_point in get_feature(self.module, 'sanitize_code_points'): + if bytes([8]) == b'\x08': # python 3 + byte = bytes([code_point]) + elif chr(8) == b'\x08': # python 2 + byte = chr(code_point) + else: # very unlikely, noop + byte = b'.' + new_response = new_response.replace(byte, b'.') + return new_response + + def _parse_response(self, response): + ''' handling XML parsing exception ''' + try: + return super(OntapZAPICx, self)._parse_response(response) + except zapi.etree.XMLSyntaxError as exc: + if has_feature(self.module, 'sanitize_xml'): + try: + return super(OntapZAPICx, self)._parse_response(self.sanitize_xml(response)) + except Exception: + # ignore a second exception, we'll report the first one + pass + try: + # report first exception, but include full response + exc.msg += ". Received: %s" % response + except Exception: + # in case the response is very badly formatted, ignore it + pass + raise exc + + def _create_request(self, na_element, enable_tunneling=False): + ''' intercept newly created request to add Authorization header ''' + request, netapp_element = super(OntapZAPICx, self)._create_request(na_element, enable_tunneling=enable_tunneling) + request.add_header('X-Dot-Client-App', CLIENT_APP_VERSION % self.module._name) + if self.base64_creds is not None: + request.add_header('Authorization', 'Basic %s' % self.base64_creds) + return request, netapp_element + + # as is from latest version of netapp-lib + def invoke_elem(self, na_element, enable_tunneling=False): + """Invoke the API on the server.""" + if not na_element or not isinstance(na_element, zapi.NaElement): + raise ValueError('NaElement must be supplied to invoke API') + + request, request_element = self._create_request(na_element, + enable_tunneling) + + if self._trace: + zapi.LOG.debug("Request: %s", request_element.to_string(pretty=True)) + + if not hasattr(self, '_opener') or not self._opener \ + or self._refresh_conn: + self._build_opener() + try: + if hasattr(self, '_timeout'): + response = self._opener.open(request, timeout=self._timeout) + else: + response = self._opener.open(request) + except zapi.urllib.error.HTTPError as exc: + raise zapi.NaApiError(exc.code, exc.reason) + except zapi.urllib.error.URLError as exc: + msg = 'URL error' + error = repr(exc) + try: + # ConnectionRefusedError is not defined in python 2.7 + if isinstance(exc.reason, ConnectionRefusedError): + msg = 'Unable to connect' + error = exc.args + except Exception: + pass + raise zapi.NaApiError(msg, error) + except Exception as exc: + raise zapi.NaApiError('Unexpected error', repr(exc)) + + response_xml = response.read() + response_element = self._get_result(response_xml) + + if self._trace: + zapi.LOG.debug("Response: %s", response_element.to_string(pretty=True)) + + return response_element + + +class OntapRestAPI(object): + ''' wrapper to send requests to ONTAP REST APIs ''' + def __init__(self, module, timeout=60, host_options=None): + self.host_options = module.params if host_options is None else host_options + self.module = module + # either username/password or a certifcate with/without a key are used for authentication + self.username = self.host_options.get('username') + self.password = self.host_options.get('password') + self.hostname = self.host_options['hostname'] + self.use_rest = self.host_options['use_rest'].lower() + self.cert_filepath = self.host_options.get('cert_filepath') + self.key_filepath = self.host_options.get('key_filepath') + self.verify = self.host_options['validate_certs'] + self.timeout = timeout + port = self.host_options['http_port'] + self.force_ontap_version = self.host_options.get('force_ontap_version') + if port is None: + self.url = 'https://%s/api/' % self.hostname + else: + self.url = 'https://%s:%d/api/' % (self.hostname, port) + self.is_rest_error = None + self.fallback_to_zapi_reason = None + self.ontap_version = dict( + full='unknown', + generation=-1, + major=-1, + minor=-1, + valid=False + ) + self.errors = [] + self.debug_logs = [] + self.auth_method = set_auth_method(self.module, self.username, self.password, self.cert_filepath, self.key_filepath) + self.check_required_library() + if has_feature(module, 'trace_apis'): + logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s') + self.log_headers = has_feature(module, 'trace_headers') + self.log_auth_args = has_feature(module, 'trace_auth_args') + + def requires_ontap_9_6(self, module_name): + return self.requires_ontap_version(module_name) + + def requires_ontap_version(self, module_name, version='9.6'): + suffix = " - %s" % self.is_rest_error if self.is_rest_error is not None else "" + return "%s only supports REST, and requires ONTAP %s or later.%s" % (module_name, version, suffix) + + def options_require_ontap_version(self, options, version='9.6', use_rest=None): + current_version = self.get_ontap_version() + suffix = " - %s" % self.is_rest_error if self.is_rest_error is not None else "" + if current_version != (-1, -1, -1): + suffix += " - ONTAP version: %s.%s.%s" % current_version + if use_rest is not None: + suffix += " - using %s" % ('REST' if use_rest else 'ZAPI') + if isinstance(options, list) and len(options) > 1: + tag = "any of %s" % options + elif isinstance(options, list) and len(options) == 1: + tag = str(options[0]) + else: + tag = str(options) + return 'using %s requires ONTAP %s or later and REST must be enabled%s.' % (tag, version, suffix) + + def meets_rest_minimum_version(self, use_rest, minimum_generation, minimum_major, minimum_minor=0): + return use_rest and self.get_ontap_version() >= (minimum_generation, minimum_major, minimum_minor) + + def fail_if_not_rest_minimum_version(self, module_name, minimum_generation, minimum_major, minimum_minor=0): + status_code = self.get_ontap_version_using_rest() + msgs = [] + if self.use_rest == 'never': + msgs.append('Error: REST is required for this module, found: "use_rest: %s".' % self.use_rest) + # The module only supports REST, so make it required + self.use_rest = 'always' + if self.is_rest_error: + msgs.append('Error using REST for version, error: %s.' % self.is_rest_error) + if status_code != 200: + msgs.append('Error using REST for version, status_code: %s.' % status_code) + if msgs: + self.module.fail_json(msg=' '.join(msgs)) + version = self.get_ontap_version() + if version < (minimum_generation, minimum_major, minimum_minor): + msg = 'Error: ' + self.requires_ontap_version(module_name, '%d.%d.%d' % (minimum_generation, minimum_major, minimum_minor)) + msg += ' Found: %s.%s.%s.' % version + self.module.fail_json(msg=msg) + + def check_required_library(self): + if not HAS_REQUESTS: + self.module.fail_json(msg=missing_required_lib('requests')) + + def build_headers(self, accept=None, vserver_name=None, vserver_uuid=None): + headers = {'X-Dot-Client-App': CLIENT_APP_VERSION % self.module._name} + # accept is used to turn on/off HAL linking + if accept is not None: + headers['accept'] = accept + # vserver tunneling using vserver name and/or UUID + if vserver_name is not None: + headers['X-Dot-SVM-Name'] = vserver_name + if vserver_uuid is not None: + headers['X-Dot-SVM-UUID'] = vserver_uuid + return headers + + def send_request(self, method, api, params, json=None, headers=None, files=None): + ''' send http request and process reponse, including error conditions ''' + url = self.url + api + + def get_auth_args(): + if self.auth_method == 'single_cert': + kwargs = dict(cert=self.cert_filepath) + elif self.auth_method == 'cert_key': + kwargs = dict(cert=(self.cert_filepath, self.key_filepath)) + elif self.auth_method in ('basic_auth', 'speedy_basic_auth'): + # with requests, there is no challenge, eg no 401. + kwargs = dict(auth=(self.username, self.password)) + else: + raise KeyError(self.auth_method) + return kwargs + + status_code, json_dict, error_details = self._send_request(method, url, params, json, headers, files, get_auth_args()) + + return status_code, json_dict, error_details + + def _send_request(self, method, url, params, json, headers, files, auth_args): + status_code = None + json_dict = None + json_error = None + error_details = None + if headers is None: + headers = self.build_headers() + + def fail_on_non_empty_value(response): + '''json() may fail on an empty value, but it's OK if no response is expected. + To avoid false positives, only report an issue when we expect to read a value. + The first get will see it. + ''' + if method == 'GET' and has_feature(self.module, 'strict_json_check'): + contents = response.content + if len(contents) > 0: + raise ValueError("Expecting json, got: %s" % contents) + + def get_json(response): + ''' extract json, and error message if present ''' + try: + json = response.json() + except ValueError: + fail_on_non_empty_value(response) + return None, None + return json, json.get('error') + + self.log_debug('sending', repr(dict(method=method, url=url, verify=self.verify, params=params, + timeout=self.timeout, json=json, + headers=headers if self.log_headers else 'redacted', + auth_args=auth_args if self.log_auth_args else 'redacted'))) + try: + response = requests.request(method, url, verify=self.verify, params=params, + timeout=self.timeout, json=json, headers=headers, files=files, **auth_args) + status_code = response.status_code + self.log_debug(status_code, response.content) + # If the response was successful, no Exception will be raised + response.raise_for_status() + json_dict, json_error = get_json(response) + except requests.exceptions.HTTPError as err: + try: + __, json_error = get_json(response) + except (AttributeError, ValueError): + json_error = None + if json_error is None: + self.log_error(status_code, 'HTTP error: %s' % err) + error_details = str(err) + + # If an error was reported in the json payload, it is handled below + except requests.exceptions.ConnectionError as err: + self.log_error(status_code, 'Connection error: %s' % err) + error_details = str(err) + except Exception as err: + self.log_error(status_code, 'Other error: %s' % err) + error_details = str(err) + if json_error is not None: + self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error)) + error_details = json_error + if not error_details and not json_dict: + if json_dict is None: + json_dict = {} + if method == 'OPTIONS': + # OPTIONS provides the list of supported verbs + json_dict['Allow'] = response.headers.get('Allow') + if response.headers.get('Content-Type', '').startswith("multipart/form-data"): + json_dict['text'] = response.text + return status_code, json_dict, error_details + + def _is_job_done(self, job_json, job_state, job_error, timed_out): + """ return (done, message, error) + done is True to indicate that the job is complete, or failed, or timed out + done is False when the job is still running + """ + # a job looks like this + # { + # "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14", + # "description": "POST /api/cluster/metrocluster", + # "state": "failure", + # "message": "There are not enough disks in Pool1.", **OPTIONAL** + # "code": 2432836, + # "start_time": "2020-02-26T10:35:44-08:00", + # "end_time": "2020-02-26T10:47:38-08:00", + # "_links": { + # "self": { + # "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14" + # } + # } + # } + done, error = False, None + message = job_json.get('message', '') if job_json else None + if job_state == 'failure': + # if the job has failed, return message as error + error = message + message = None + done = True + elif job_state not in ('queued', 'running', None): + error = job_error + done = True + elif timed_out: + # Would like to post a message to user (not sure how) + self.log_error(0, 'Timeout error: Process still running') + error = 'Timeout error: Process still running' + if job_error is not None: + error += ' - %s' % job_error + done = True + return done, message, error + + def wait_on_job(self, job, timeout=600, increment=60): + try: + url = job['_links']['self']['href'].split('api/')[1] + except Exception as err: + self.log_error(0, 'URL Incorrect format: %s - Job: %s' % (err, job)) + return None, 'URL Incorrect format: %s - Job: %s' % (err, job) + # Expecting job to be in the following format + # {'job': + # {'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7', + # '_links': + # {'self': + # {'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'} + # } + # } + # } + error = None + errors = [] + message = None + runtime = 0 + retries = 0 + max_retries = 3 + done = False + while not done: + # Will run every seconds for seconds + job_json, job_error = self.get(url, None) + job_state = job_json.get('state', None) if job_json else None + # ignore error if status is provided in the job + if job_error and job_state is None: + errors.append(str(job_error)) + retries += 1 + if retries > max_retries: + error = " - ".join(errors) + self.log_error(0, 'Job error: Reached max retries.') + done = True + else: + retries = 0 + done, message, error = self._is_job_done(job_json, job_state, job_error, runtime >= timeout) + if not done: + time.sleep(increment) + runtime += increment + return message, error + + def get(self, api, params=None, headers=None): + method = 'GET' + dummy, message, error = self.send_request(method, api, params, json=None, headers=headers) + return message, error + + def post(self, api, body, params=None, headers=None, files=None): + method = 'POST' + retry = 3 + while retry > 0: + dummy, message, error = self.send_request(method, api, params, json=body, headers=headers, files=files) + if error and type(error) is dict and 'temporarily locked' in error.get('message', ''): + time.sleep(30) + retry = retry - 1 + continue + break + return message, error + + def patch(self, api, body, params=None, headers=None, files=None): + method = 'PATCH' + retry = 3 + while retry > 0: + dummy, message, error = self.send_request(method, api, params, json=body, headers=headers, files=files) + if error and type(error) is dict and 'temporarily locked' in error.get('message', ''): + time.sleep(30) + retry = retry - 1 + continue + break + return message, error + + def delete(self, api, body=None, params=None, headers=None): + method = 'DELETE' + dummy, message, error = self.send_request(method, api, params, json=body, headers=headers) + return message, error + + def options(self, api, params=None, headers=None): + method = 'OPTIONS' + dummy, message, error = self.send_request(method, api, params, json=None, headers=headers) + return message, error + + def set_version(self, message): + try: + version = message.get('version', 'not found') + except AttributeError: + self.ontap_version['valid'] = False + self.ontap_version['full'] = 'unreadable message' + return + for key in self.ontap_version: + try: + self.ontap_version[key] = version.get(key, -1) + except AttributeError: + self.ontap_version[key] = -1 + self.ontap_version['valid'] = all( + self.ontap_version[key] != -1 for key in self.ontap_version if key != 'valid' + ) + + def get_ontap_version(self): + if self.ontap_version['valid']: + return self.ontap_version['generation'], self.ontap_version['major'], self.ontap_version['minor'] + return -1, -1, -1 + + def get_node_version_using_rest(self): + # using GET rather than HEAD because the error messages are different, + # and we need the version as some REST options are not available in earlier versions + method = 'GET' + api = 'cluster/nodes' + params = {'fields': ['version']} + status_code, message, error = self.send_request(method, api, params=params) + if message and 'records' in message and len(message['records']) > 0: + message = message['records'][0] + return status_code, message, error + + def get_ontap_version_from_params(self): + """ Provide a way to override the current version + This is required when running a custom vsadmin role as ONTAP does not currently allow access to /api/cluster. + This may also be interesting for testing :) + Report a warning if API call failed to report version. + Report a warning if current version could be fetched and is different. + """ + try: + version = [int(x) for x in self.force_ontap_version.split('.')] + if len(version) == 2: + version.append(0) + gen, major, minor = version + except (TypeError, ValueError) as exc: + self.module.fail_json( + msg='Error: unexpected format in force_ontap_version, expecting G.M.m or G.M, as in 9.10.1, got: %s, error: %s' + % (self.force_ontap_version, exc)) + + warning = '' + read_version = self.get_ontap_version() + if read_version == (-1, -1, -1): + warning = ', unable to read current version:' + elif read_version != (gen, major, minor): + warning = ' but current version is %s' % self.ontap_version['full'] + if warning: + warning = 'Forcing ONTAP version to %s%s' % (self.force_ontap_version, warning) + self.set_version({'version': { + 'generation': gen, + 'major': major, + 'minor': minor, + 'full': 'set by user to %s' % self.force_ontap_version, + }}) + return warning + + def get_ontap_version_using_rest(self): + # using GET rather than HEAD because the error messages are different, + # and we need the version as some REST options are not available in earlier versions + method = 'GET' + api = 'cluster' + params = {'fields': ['version']} + status_code, message, error = self.send_request(method, api, params=params) + try: + if error and 'are available in precluster.' in error.get('message', ''): + # in precluster mode, version is not available :( + status_code, message, error = self.get_node_version_using_rest() + except AttributeError: + pass + self.set_version(message) + if error: + self.log_error(status_code, str(error)) + if self.force_ontap_version: + warning = self.get_ontap_version_from_params() + if error: + warning += ' error: %s, status_code: %s' % (error, status_code) + if warning: + self.module.warn(warning) + msg = 'Forcing ONTAP version to %s' % self.force_ontap_version + if error: + self.log_error('INFO', msg) + else: + self.log_debug('INFO', msg) + error = None + status_code = 200 + self.is_rest_error = str(error) if error else None + return status_code + + def convert_parameter_keys_to_dot_notation(self, parameters): + """ Get all variable set in a list and add them to a dict so that partially_supported_rest_properties works correctly """ + if isinstance(parameters, dict): + temp = {} + for parameter in parameters: + if isinstance(parameters[parameter], list): + if parameter not in temp: + temp[parameter] = {} + for adict in parameters[parameter]: + if isinstance(adict, dict): + for key in adict: + temp[parameter + '.' + key] = 0 + parameters.update(temp) + return parameters + + def _is_rest(self, used_unsupported_rest_properties=None, partially_supported_rest_properties=None, parameters=None): + if self.use_rest not in ['always', 'auto', 'never']: + error = "use_rest must be one of: never, always, auto. Got: '%s'" % self.use_rest + return False, error + if self.use_rest == "always" and used_unsupported_rest_properties: + error = "REST API currently does not support '%s'" % ', '.join(used_unsupported_rest_properties) + return True, error + if self.use_rest == 'never': + # force ZAPI if requested + return False, None + # don't send a new request if we already know the version + status_code = self.get_ontap_version_using_rest() if self.get_ontap_version() == (-1, -1, -1) else 200 + if self.use_rest == "always" and partially_supported_rest_properties: + # If a variable is on a list we need to move it to a dict for this check to work correctly. + temp_parameters = parameters.copy() + temp_parameters = self.convert_parameter_keys_to_dot_notation(temp_parameters) + error = '\n'.join( + "Minimum version of ONTAP for %s is %s." % (property[0], str(property[1])) + for property in partially_supported_rest_properties + if self.get_ontap_version()[:3] < property[1] and property[0] in temp_parameters + ) + if error != '': + return True, 'Error: %s Current version: %s.' % (error, self.get_ontap_version()) + if self.use_rest == 'always': + # ignore error, it will show up later when calling another REST API + return True, None + # we're now using 'auto' + if used_unsupported_rest_properties: + # force ZAPI if some parameter requires it + if self.get_ontap_version()[:2] > (9, 5): + self.fallback_to_zapi_reason =\ + 'because of unsupported option(s) or option value(s) in REST: %s' % used_unsupported_rest_properties + self.module.warn('Falling back to ZAPI %s' % self.fallback_to_zapi_reason) + return False, None + if partially_supported_rest_properties: + # if ontap version is lower than partially_supported_rest_properties version, force ZAPI, only if the paramater is used + # If a variable is on a list we need to move it to a dict for this check to work correctly. + temp_parameters = parameters.copy() + temp_parameters = self.convert_parameter_keys_to_dot_notation(temp_parameters) + for property in partially_supported_rest_properties: + if self.get_ontap_version()[:3] < property[1] and property[0] in temp_parameters: + self.fallback_to_zapi_reason =\ + 'because of unsupported option(s) or option value(s) "%s" in REST require %s' % (property[0], str(property[1])) + self.module.warn('Falling back to ZAPI %s' % self.fallback_to_zapi_reason) + return False, None + if self.get_ontap_version()[:2] in ((9, 4), (9, 5)): + # we can't trust REST support on 9.5, and not at all on 9.4 + return False, None + return (True, None) if status_code == 200 else (False, None) + + def is_rest_supported_properties(self, parameters, unsupported_rest_properties=None, partially_supported_rest_properties=None, report_error=False): + used_unsupported_rest_properties = None + if unsupported_rest_properties: + used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in parameters] + use_rest, error = self.is_rest(used_unsupported_rest_properties, partially_supported_rest_properties, parameters) + if report_error: + return use_rest, error + if error: + self.module.fail_json(msg=error) + return use_rest + + def is_rest(self, used_unsupported_rest_properties=None, partially_supported_rest_properties=None, parameters=None): + ''' only return error if there is a reason to ''' + use_rest, error = self._is_rest(used_unsupported_rest_properties, partially_supported_rest_properties, parameters) + if used_unsupported_rest_properties is None and partially_supported_rest_properties is None: + return use_rest + return use_rest, error + + def log_error(self, status_code, message): + LOG.error("%s: %s", status_code, message) + self.errors.append(message) + self.debug_logs.append((status_code, message)) + + def log_debug(self, status_code, content): + LOG.debug("%s: %s", status_code, content) + self.debug_logs.append((status_code, content)) + + def write_to_file(self, tag, data=None, filepath=None, append=True): + ''' + This function is only for debug purposes, all calls to write_to_file should be removed + before submitting. + If data is None, tag is considered as data + else tag is a label, and data is data. + ''' + if filepath is None: + filepath = '/tmp/ontap_log' + mode = 'a' if append else 'w' + with open(filepath, mode) as afile: + if data is not None: + afile.write("%s: %s\n" % (str(tag), str(data))) + else: + afile.write(str(tag)) + afile.write('\n') + + def write_errors_to_file(self, tag=None, filepath=None, append=True): + if tag is None: + tag = 'Error' + for error in self.errors: + self.write_to_file(tag, error, filepath, append) + if not append: + append = True + + def write_debug_log_to_file(self, tag=None, filepath=None, append=True): + if tag is None: + tag = 'Debug' + for status_code, message in self.debug_logs: + self.write_to_file(tag, status_code, filepath, append) + if not append: + append = True + self.write_to_file(tag, message, filepath, append) diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py new file mode 100644 index 000000000..d16c992ec --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_elementsw_module.py @@ -0,0 +1,41 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +HAS_SF_SDK = False +try: + # pylint: disable=unused-import + import solidfire.common + + HAS_SF_SDK = True +except Exception: + HAS_SF_SDK = False + + +def has_sf_sdk(): + return HAS_SF_SDK + + +class NaElementSWModule(object): + + def __init__(self, elem): + self.elem_connect = elem + self.parameters = dict() + + def volume_id_exists(self, volume_id): + """ + Return volume_id if volume exists for given volume_id + + :param volume_id: volume ID + :type volume_id: int + :return: Volume ID if found, None if not found + :rtype: int + """ + volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id]) + for volume in volume_list.volumes: + if volume.volume_id == volume_id: + if str(volume.delete_time) == "": + return volume.volume_id + return None diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py new file mode 100644 index 000000000..a936071ca --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_ipaddress.py @@ -0,0 +1,134 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020-2022, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support class for NetApp ansible modules + + Provides accesss to ipaddress - mediating unicode issues with python2.7 +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils._text import to_native + +try: + import ipaddress + HAS_IPADDRESS_LIB = True + IMPORT_ERROR = None +except ImportError as exc: + HAS_IPADDRESS_LIB = False + IMPORT_ERROR = to_native(exc) + + +def _check_ipaddress_is_present(module): + ''' + report error at runtime rather than when attempting to load the module + ''' + if HAS_IPADDRESS_LIB: + return None + module.fail_json(msg="Error: the python ipaddress package is required for this module. Import error: %s" % IMPORT_ERROR) + + +def _get_ipv4orv6_address(ip_address, module): + ''' + return IPV4Adress or IPV6Address object + ''' + _check_ipaddress_is_present(module) + # python 2.7 requires unicode format + ip_addr = u'%s' % ip_address + try: + return ipaddress.ip_address(ip_addr) + except ValueError as exc: + error = 'Error: Invalid IP address value %s - %s' % (ip_address, to_native(exc)) + module.fail_json(msg=error) + + +def _get_ipv4orv6_network(ip_address, netmask, strict, module): + ''' + return IPV4Network or IPV6Network object + ''' + _check_ipaddress_is_present(module) + # python 2.7 requires unicode format + ip_addr = u'%s/%s' % (ip_address, netmask) if netmask is not None else u'%s' % ip_address + try: + return ipaddress.ip_network(ip_addr, strict) + except ValueError as exc: + error = 'Error: Invalid IP network value %s' % ip_addr + if 'has host bits set' in to_native(exc): + error += '. Please specify a network address without host bits set' + elif netmask is not None: + error += '. Check address and netmask values' + error += ': %s.' % to_native(exc) + module.fail_json(msg=error) + + +def _check_ipv6_has_prefix_length(ip_address, netmask, module): + ip_address = _get_ipv4orv6_address(ip_address, module) + if not isinstance(ip_address, ipaddress.IPv6Address) or isinstance(netmask, int): + return + if ':' in netmask: + module.fail_json(msg='Error: only prefix_len is supported for IPv6 addresses, got %s' % netmask) + + +def validate_ip_address_is_network_address(ip_address, module): + ''' + Validate if the given IP address is a network address (i.e. it's host bits are set to 0) + ONTAP doesn't validate if the host bits are set, + and hence doesn't add a new address unless the IP is from a different network. + So this validation allows the module to be idempotent. + :return: None + ''' + dummy = _get_ipv4orv6_network(ip_address, None, True, module) + + +def validate_and_compress_ip_address(ip_address, module): + ''' + 0's in IPv6 addresses can be compressed to save space + This will be a noop for IPv4 address + In addition, it makes sure the address is in a valid format + ''' + # return compressed value for IPv6 and value in . notation for IPv4 + return str(_get_ipv4orv6_address(ip_address, module)) + + +def netmask_length_to_netmask(ip_address, length, module): + ''' + input: ip_address and netmask length + output: netmask in dot notation + ''' + return str(_get_ipv4orv6_network(ip_address, length, False, module).netmask) + + +def netmask_to_netmask_length(ip_address, netmask, module): + ''' + input: ip_address and netmask in dot notation for IPv4, expanded netmask is not supported for IPv6 + netmask as int or a str representaiton of int is also accepted + output: netmask length as int + ''' + _check_ipv6_has_prefix_length(ip_address, netmask, module) + return _get_ipv4orv6_network(ip_address, netmask, False, module).prefixlen diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..91acd3933 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/netapp_module.py @@ -0,0 +1,619 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from copy import deepcopy +import re +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +ZAPI_ONLY_DEPRECATION_MESSAGE = "This module only supports ZAPI and is deprecated. "\ + "It will no longer work with newer versions of ONTAP. "\ + "The final ONTAP version to support ZAPI is ONTAP 9.12.1." + + +def cmp(obj1, obj2): + """ + Python 3 does not have a cmp function, this will do the cmp. + :param obj1: first object to check + :param obj2: second object to check + :return: + """ + # convert to lower case for string comparison. + if obj1 is None: + return -1 + if isinstance(obj1, str) and isinstance(obj2, str): + obj1 = obj1.lower() + obj2 = obj2.lower() + # if list has string element, convert string to lower case. + if isinstance(obj1, list) and isinstance(obj2, list): + obj1 = [x.lower() if isinstance(x, str) else x for x in obj1] + obj2 = [x.lower() if isinstance(x, str) else x for x in obj2] + obj1.sort() + obj2.sort() + return (obj1 > obj2) - (obj1 < obj2) + + +class NetAppModule(object): + ''' + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + ''' + + def __init__(self, module=None): + # we can call this with module set to self or self.module + # self is a NetApp module, while self.module is the AnsibleModule object + self.netapp_module = None + self.ansible_module = module + if module and getattr(module, 'module', None) is not None: + self.netapp_module = module + self.ansible_module = module.module + # When using self or self.module, this gives access to: + # self.ansible_module.fail_json + # When using self, this gives access to: + # self.netapp_module.rest_api.log_debug + self.log = [] + self.changed = False + self.parameters = {'name': 'not initialized'} + self.zapi_string_keys = {} + self.zapi_bool_keys = {} + self.zapi_list_keys = {} + self.zapi_int_keys = {} + self.zapi_required = {} + self.params_to_rest_api_keys = {} + + def module_deprecated(self, module): + module.warn(ZAPI_ONLY_DEPRECATION_MESSAGE) + + def module_replaces(self, new_module, module): + self.module_deprecated(module) + module.warn('netapp.ontap.%s should be used instead.' % new_module) + + def set_parameters(self, ansible_params): + self.parameters = {} + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters + + def fall_back_to_zapi(self, module, msg, parameters): + if parameters['use_rest'].lower() == 'always': + module.fail_json(msg='Error: %s' % msg) + if parameters['use_rest'].lower() == 'auto': + module.warn('Falling back to ZAPI: %s' % msg) + return False + + def check_and_set_parameters(self, module): + self.parameters = {} + check_for_none = netapp_utils.has_feature(module, 'check_required_params_for_none') + if check_for_none: + required_keys = [key for key, value in module.argument_spec.items() if value.get('required')] + for param in module.params: + if module.params[param] is not None: + self.parameters[param] = module.params[param] + elif check_for_none and param in required_keys: + module.fail_json(msg="%s requires a value, got: None" % param) + return self.parameters + + @staticmethod + def type_error_message(type_str, key, value): + return "expecting '%s' type for %s: %s, got: %s" % (type_str, repr(key), repr(value), type(value)) + + def get_value_for_bool(self, from_zapi, value, key=None): + """ + Convert boolean values to string or vice-versa + If from_zapi = True, value is converted from string (as it appears in ZAPI) to boolean + If from_zapi = False, value is converted from boolean to string + For get() method, from_zapi = True + For modify(), create(), from_zapi = False + :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type + :param value: value of the boolean attribute + :param key: if present, force error checking to validate type, and accepted values + :return: string or boolean + """ + if value is None: + return None + if from_zapi: + if key is not None and not isinstance(value, str): + raise TypeError(self.type_error_message('str', key, value)) + if key is not None and value not in ('true', 'false'): + raise ValueError('Unexpected value: %s received from ZAPI for boolean attribute: %s' % (repr(value), repr(key))) + return value == 'true' + if key is not None and not isinstance(value, bool): + raise TypeError(self.type_error_message('bool', key, value)) + return 'true' if value else 'false' + + def get_value_for_int(self, from_zapi, value, key=None): + """ + Convert integer values to string or vice-versa + If from_zapi = True, value is converted from string (as it appears in ZAPI) to integer + If from_zapi = False, value is converted from integer to string + For get() method, from_zapi = True + For modify(), create(), from_zapi = False + :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type + :param value: value of the integer attribute + :param key: if present, force error checking to validate type + :return: string or integer + """ + if value is None: + return None + if from_zapi: + if key is not None and not isinstance(value, str): + raise TypeError(self.type_error_message('str', key, value)) + return int(value) + if key is not None and not isinstance(value, int): + raise TypeError(self.type_error_message('int', key, value)) + return str(value) + + def get_value_for_list(self, from_zapi, zapi_parent, zapi_child=None, data=None): + """ + Convert a python list() to NaElement or vice-versa + If from_zapi = True, value is converted from NaElement (parent-children structure) to list() + If from_zapi = False, value is converted from list() to NaElement + :param zapi_parent: ZAPI parent key or the ZAPI parent NaElement + :param zapi_child: ZAPI child key + :param data: list() to be converted to NaElement parent-children object + :param from_zapi: convert the value from ZAPI or to ZAPI acceptable type + :return: list() or NaElement + """ + if from_zapi: + if zapi_parent is None: + return [] + return [zapi_child.get_content() for zapi_child in zapi_parent.get_children()] + + zapi_parent = netapp_utils.zapi.NaElement(zapi_parent) + for item in data: + zapi_parent.add_new_child(zapi_child, item) + return zapi_parent + + def get_cd_action(self, current, desired): + ''' takes a desired state and a current state, and return an action: + create, delete, None + eg: + is_present = 'absent' + some_object = self.get_object(source) + if some_object is not None: + is_present = 'present' + action = cd_action(current=is_present, desired = self.desired.state()) + ''' + desired_state = desired['state'] if 'state' in desired else 'present' + if current is None and desired_state == 'absent': + return None + if current is not None and desired_state == 'present': + return None + # change in state + self.changed = True + return 'create' if current is None else 'delete' + + @staticmethod + def check_keys(current, desired): + ''' TODO: raise an error if keys do not match + with the exception of: + new_name, state in desired + ''' + + @staticmethod + def compare_lists(current, desired, get_list_diff): + ''' compares two lists and return a list of elements that are either the desired elements or elements that are + modified from the current state depending on the get_list_diff flag + :param: current: current item attribute in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: list of attributes to be modified + :rtype: list + ''' + current_copy = deepcopy(current) + desired_copy = deepcopy(desired) + + # get what in desired and not in current + desired_diff_list = [] + for item in desired: + if item in current_copy: + current_copy.remove(item) + else: + desired_diff_list.append(item) + + # get what in current but not in desired + current_diff_list = [] + for item in current: + if item in desired_copy: + desired_copy.remove(item) + else: + current_diff_list.append(item) + + if desired_diff_list or current_diff_list: + # there are changes + return desired_diff_list if get_list_diff else desired + else: + return None + + def get_modified_attributes(self, current, desired, get_list_diff=False): + ''' takes two dicts of attributes and return a dict of attributes that are + not in the current state + It is expected that all attributes of interest are listed in current and + desired. + :param: current: current attributes in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: dict of attributes to be modified + :rtype: dict + + NOTE: depending on the attribute, the caller may need to do a modify or a + different operation (eg move volume if the modified attribute is an + aggregate name) + ''' + # if the object does not exist, we can't modify it + modified = {} + if current is None: + return modified + + if not isinstance(desired, dict): + raise TypeError("Expecting dict, got: %s with current: %s" % (desired, current)) + # error out if keys do not match + self.check_keys(current, desired) + + # collect changed attributes + for key, value in current.items(): + # if self.netapp_module: + # self.netapp_module.rest_api.log_debug('KDV', "%s:%s:%s" % (key, desired.get(key), value)) + if desired.get(key) is not None: + modified_value = None + if isinstance(value, list): + modified_value = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired + elif isinstance(value, dict): + modified_value = self.get_modified_attributes(value, desired[key]) or None + else: + try: + result = cmp(value, desired[key]) + except TypeError as exc: + raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key]))) + # if self.netapp_module: + # self.netapp_module.rest_api.log_debug('RESULT', result) + if result != 0: + modified_value = desired[key] + if modified_value is not None: + modified[key] = modified_value + + if modified: + self.changed = True + return modified + + def is_rename_action(self, source, target): + ''' takes a source and target object, and returns True + if a rename is required + eg: + source = self.get_object(source_name) + target = self.get_object(target_name) + action = is_rename_action(source, target) + :return: None for error, True for rename action, False otherwise + + I'm not sure we need this function any more. + I think a better way to do it is to: + 1. look if a create is required (eg the target resource does not exist and state==present) + 2. consider that a create can be fullfilled by different actions: rename, create from scratch, move, ... + So for rename: + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # creating new subnet by renaming + current = self.get_subnet(self.parameters['from_name']) + if current is None: + self.module.fail_json(msg="Error renaming: subnet %s does not exist" % + self.parameters['from_name']) + rename = True + cd_action = None + ''' + if source is None and target is None: + # error, do nothing + # cannot rename a non existent resource + return None + if target is None: + # source is not None and target is None: + # rename is in order + self.changed = True + return True + # target is not None, so do nothing as the destination exists + # if source is None, maybe we already renamed + # if source is not None, maybe a new resource was created after being renamed + return False + + @staticmethod + def sanitize_wwn(initiator): + ''' igroup initiator may or may not be using WWN format: eg 20:00:00:25:B5:00:20:01 + if format is matched, convert initiator to lowercase, as this is what ONTAP is using ''' + wwn_format = r'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}' + initiator = initiator.strip() + if re.match(wwn_format, initiator): + initiator = initiator.lower() + return initiator + + def safe_get(self, an_object, key_list, allow_sparse_dict=True): + ''' recursively traverse a dictionary or a any object supporting get_item or indexing + (in our case, python dicts and NAElement responses, and lists) + It is expected that some keys can be missing, this is controlled with allow_sparse_dict + + return value if the key chain is exhausted + return None if a key is not found and allow_sparse_dict is True + raise KeyError is a key is not found and allow_sparse_dict is False (looking for exact match) + raise TypeError if an intermediate element cannot be indexed, + unless the element is None and allow_sparse_dict is True + ''' + if not key_list: + # we've exhausted the keys, good! + return an_object + key_list = list(key_list) # preserve original values + key = key_list.pop(0) + try: + return self.safe_get(an_object[key], key_list, allow_sparse_dict=allow_sparse_dict) + except (KeyError, IndexError) as exc: + # error, key or index not found + if allow_sparse_dict: + return None + raise exc + except TypeError as exc: + # error, we were expecting a dict or NAElement + if allow_sparse_dict and an_object is None: + return None + raise exc + + def convert_value(self, value, convert_to): + if convert_to is None: + return value, None + if not isinstance(value, str): + return None, ('Unexpected type: %s for %s' % (type(value), str(value))) + if convert_to == str: + return value, None + if convert_to == int: + try: + return int(value), None + except ValueError as exc: + return None, ('Unexpected value for int: %s, %s' % (str(value), str(exc))) + if convert_to == bool: + if value not in ('true', 'false'): + return None, 'Unexpected value: %s received from ZAPI for boolean attribute' % value + return value == 'true', None + if convert_to == 'bool_online': + return value == 'online', None + self.ansible_module.fail_json(msg='Error: Unexpected value for convert_to: %s' % convert_to) + + def zapi_get_value(self, na_element, key_list, required=False, default=None, convert_to=None): + """ read a value from na_element using key_list + + If required is True, an error is reported if a key in key_list is not found. + If required is False and the value is not found, uses default as the value. + If convert_to is set to str, bool, int, the ZAPI value is converted from str to the desired type. + suported values: None, the python types int, str, bool, special 'bool_online' + + Errors: fail_json is called for: + - a key is not found and required=True, + - a format conversion error + """ + + # keep a copy, as the list is mutated + saved_key_list = list(key_list) + try: + value = self.safe_get(na_element, key_list, allow_sparse_dict=not required) + except (KeyError, TypeError) as exc: + error = exc + else: + value, error = self.convert_value(value, convert_to) if value is not None else (default, None) + if error: + self.ansible_module.fail_json(msg='Error reading %s from %s: %s' % (saved_key_list, na_element.to_string(), error)) + return value + + def zapi_get_attrs(self, na_element, attr_dict, result): + """ Retrieve a list of attributes from na_elements + see na_ontap_volume for an example. + na_element: xml element as returned by ZAPI. + attr_dict: + A dict of dict, with format: + key: dict(key_list, required=False, default=None, convert_to=None, omitnone=False) + The keys are used to index a result dictionary, values are read from a ZAPI object indexed by key_list. + If required is True, an error is reported if a key in key_list is not found. + If required is False and the value is not found, uses default as the value. + If convert_to is set to str, bool, int, the ZAPI value is converted from str to the desired type. + I'm not sure there is much value in omitnone, but it preserves backward compatibility. + When the value is None, if omitnone is False, a None value is recorded, if True, the key is not set. + result: an existing dictionary. keys are added or updated based on attrs. + + Errors: fail_json is called for: + - a key is not found and required=True, + - a format conversion error + """ + for key, kwargs in attr_dict.items(): + omitnone = kwargs.pop('omitnone', False) + value = self.zapi_get_value(na_element, **kwargs) + if value is not None or not omitnone: + result[key] = value + + def _filter_out_none_entries_from_dict(self, adict, allow_empty_list_or_dict): + """take a dict as input and return a dict without keys whose values are None + return empty dicts or lists if allow_empty_list_or_dict otherwise skip empty dicts or lists. + """ + result = {} + for key, value in adict.items(): + if isinstance(value, (list, dict)): + sub = self.filter_out_none_entries(value, allow_empty_list_or_dict) + if sub or allow_empty_list_or_dict: + # allow empty dict or list if allow_empty_list_or_dict is set. + # skip empty dict or list otherwise + result[key] = sub + elif value is not None: + # skip None value + result[key] = value + return result + + def _filter_out_none_entries_from_list(self, alist, allow_empty_list_or_dict): + """take a list as input and return a list without elements whose values are None + return empty dicts or lists if allow_empty_list_or_dict otherwise skip empty dicts or lists. + """ + result = [] + for item in alist: + if isinstance(item, (list, dict)): + sub = self.filter_out_none_entries(item, allow_empty_list_or_dict) + if sub or allow_empty_list_or_dict: + # allow empty dict or list if allow_empty_list_or_dict is set. + # skip empty dict or list otherwise + result.append(sub) + elif item is not None: + # skip None value + result.append(item) + return result + + def filter_out_none_entries(self, list_or_dict, allow_empty_list_or_dict=False): + """take a dict or list as input and return a dict/list without keys/elements whose values are None + return empty dicts or lists if allow_empty_list_or_dict otherwise skip empty dicts or lists. + """ + + if isinstance(list_or_dict, dict): + return self._filter_out_none_entries_from_dict(list_or_dict, allow_empty_list_or_dict) + + if isinstance(list_or_dict, list): + return self._filter_out_none_entries_from_list(list_or_dict, allow_empty_list_or_dict) + + raise TypeError('unexpected type %s' % type(list_or_dict)) + + @staticmethod + def get_caller(depth): + '''return the name of: + our caller if depth is 1 + the caller of our caller if depth is 2 + the caller of the caller of our caller if depth is 3 + ... + ''' + # one more caller in the stack + depth += 1 + frames = traceback.extract_stack(limit=depth) + try: + function_name = frames[0].name + except AttributeError: + # python 2.7 does not have named attributes for frames + try: + function_name = frames[0][2] + except Exception as exc: # pylint: disable=broad-except + function_name = 'Error retrieving function name: %s - %s' % (str(exc), repr(frames)) + return function_name + + def fail_on_error(self, error, api=None, stack=False, depth=1, previous_errors=None): + '''depth identifies how far is the caller in the call stack''' + if error is None: + return + # one more caller to account for this function + depth += 1 + if api is not None: + error = 'calling api: %s: %s' % (api, error) + results = dict(msg='Error in %s: %s' % (self.get_caller(depth), error)) + if stack: + results['stack'] = traceback.format_stack() + if previous_errors: + results['previous_errors'] = ' - '.join(previous_errors) + if getattr(self, 'ansible_module', None) is not None: + self.ansible_module.fail_json(**results) + raise AttributeError('Expecting self.ansible_module to be set when reporting %s' % repr(results)) + + def compare_chmod_value(self, current_permissions, desired_permissions): + """ + compare current unix_permissions to desired unix_permissions. + :return: True if the same, False it not the same or desired unix_permissions is not valid. + """ + if current_permissions is None: + return False + if desired_permissions.isdigit(): + return int(current_permissions) == int(desired_permissions) + # ONTAP will throw error as invalid field if the length is not 9 or 12. + if len(desired_permissions) not in [12, 9]: + return False + desired_octal_value = '' + # if the length is 12, first three character sets userid('s'), groupid('s') and sticky('t') attributes + if len(desired_permissions) == 12: + if desired_permissions[0] not in ['s', '-'] or desired_permissions[1] not in ['s', '-']\ + or desired_permissions[2] not in ['t', '-']: + return False + desired_octal_value += str(self.char_to_octal(desired_permissions[:3])) + # if the len is 9, start from 0 else start from 3. + start_range = len(desired_permissions) - 9 + for i in range(start_range, len(desired_permissions), 3): + if desired_permissions[i] not in ['r', '-'] or desired_permissions[i + 1] not in ['w', '-']\ + or desired_permissions[i + 2] not in ['x', '-']: + return False + group_permission = self.char_to_octal(desired_permissions[i:i + 3]) + desired_octal_value += str(group_permission) + return int(current_permissions) == int(desired_octal_value) + + def char_to_octal(self, chars): + """ + :param chars: Characters to be converted into octal values. + :return: octal value of the individual group permission. + """ + total = 0 + if chars[0] in ['r', 's']: + total += 4 + if chars[1] in ['w', 's']: + total += 2 + if chars[2] in ['x', 't']: + total += 1 + return total + + def ignore_missing_vserver_on_delete(self, error, vserver_name=None): + """ When a resource is expected to be absent, it's OK if the containing vserver is also absent. + This function expects self.parameters('vserver') to be set or the vserver_name argument to be passed. + error is an error returned by rest_generic.get_xxxx. + """ + if self.parameters.get('state') != 'absent': + return False + if vserver_name is None: + if self.parameters.get('vserver') is None: + self.ansible_module.fail_json( + msg='Internal error, vserver name is required, when processing error: %s' % error, exception=traceback.format_exc()) + vserver_name = self.parameters['vserver'] + if isinstance(error, str): + pass + elif isinstance(error, dict): + if 'message' in error: + error = error['message'] + else: + self.ansible_module.fail_json( + msg='Internal error, error should contain "message" key, found: %s' % error, exception=traceback.format_exc()) + else: + self.ansible_module.fail_json( + msg='Internal error, error should be str or dict, found: %s, %s' % (type(error), error), exception=traceback.format_exc()) + return 'SVM "%s" does not exist.' % vserver_name in error + + def remove_hal_links(self, records): + """ Remove all _links entries """ + if isinstance(records, dict): + records.pop('_links', None) + for record in records.values(): + self.remove_hal_links(record) + if isinstance(records, list): + for record in records: + self.remove_hal_links(record) diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py new file mode 100644 index 000000000..d96f23031 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_application.py @@ -0,0 +1,180 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020-2022, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support class for NetApp ansible modules + + Provides accesss to application resources using REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class RestApplication(): + """Helper methods to manage application and application components""" + def __init__(self, rest_api, svm_name, app_name): + self.svm_name = svm_name + self.app_name = app_name + self.app_uuid = None + self.rest_api = rest_api + + def _set_application_uuid(self): + """Use REST application/applications to get application uuid""" + api = 'application/applications' + query = {'svm.name': self.svm_name, 'name': self.app_name} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error is None and record is not None: + self.app_uuid = record['uuid'] + return None, error + + def get_application_uuid(self): + """Use REST application/applications to get application uuid""" + error = None + if self.app_uuid is None: + dummy, error = self._set_application_uuid() + return self.app_uuid, error + + def get_application_details(self, template=None): + """Use REST application/applications to get application details""" + uuid, error = self.get_application_uuid() + if error: + return uuid, error + if uuid is None: # not found + return None, None + query = dict(fields='name,%s,statistics' % template) if template else None + api = 'application/applications/%s' % uuid + return rest_generic.get_one_record(self.rest_api, api, query) + + def create_application(self, body): + """Use REST application/applications san template to create one or more LUNs""" + dummy, error = self.fail_if_uuid('create_application') + if error is not None: + return dummy, error + api = 'application/applications' + query = {'return_records': 'true'} + response, error = rest_generic.post_async(self.rest_api, api, body, query) + if error and 'Unexpected argument' in error and 'exclude_aggregates' in error: + error += ' "exclude_aggregates" requires ONTAP 9.9.1 GA or later.' + return response, error + + def patch_application(self, body): + """Use REST application/applications san template to add one or more LUNs""" + dummy, error = self.fail_if_no_uuid() + if error is not None: + return dummy, error + api = 'application/applications' + query = {'return_records': 'true'} + return rest_generic.patch_async(self.rest_api, api, self.app_uuid, body, query) + + def create_application_body(self, template_name, template_body, smart_container=True): + if not isinstance(smart_container, bool): + error = "expecting bool value for smart_container, got: %s" % smart_container + return None, error + body = { + 'name': self.app_name, + 'svm': {'name': self.svm_name}, + 'smart_container': smart_container, + template_name: template_body + } + return body, None + + def delete_application(self): + """Use REST application/applications to delete app""" + dummy, error = self.fail_if_no_uuid() + if error is not None: + return dummy, error + api = 'application/applications' + response, error = rest_generic.delete_async(self.rest_api, api, self.app_uuid) + self.app_uuid = None + return response, error + + def get_application_components(self): + """Use REST application/applications to get application components""" + dummy, error = self.fail_if_no_uuid() + if error is not None: + return dummy, error + api = 'application/applications/%s/components' % self.app_uuid + return rest_generic.get_0_or_more_records(self.rest_api, api) + + def get_application_component_uuid(self): + """Use REST application/applications to get component uuid + Assume a single component per application + """ + dummy, error = self.fail_if_no_uuid() + if error is not None: + return dummy, error + api = 'application/applications/%s/components' % self.app_uuid + record, error = rest_generic.get_one_record(self.rest_api, api, fields='uuid') + if error is None and record is not None: + return record['uuid'], None + return None, error + + def get_application_component_details(self, comp_uuid=None): + """Use REST application/applications to get application components""" + dummy, error = self.fail_if_no_uuid() + if error is not None: + return dummy, error + if comp_uuid is None: + # assume a single component + comp_uuid, error = self.get_application_component_uuid() + if error: + return comp_uuid, error + if comp_uuid is None: + error = 'no component for application %s' % self.app_name + return None, error + api = 'application/applications/%s/components/%s' % (self.app_uuid, comp_uuid) + return rest_generic.get_one_record(self.rest_api, api) + + def get_application_component_backing_storage(self): + """Use REST application/applications to get component uuid. + + Assume a single component per application + """ + dummy, error = self.fail_if_no_uuid() + if error is not None: + return dummy, error + response, error = self.get_application_component_details() + if error or response is None: + return response, error + return response['backing_storage'], None + + def fail_if_no_uuid(self): + """Prevent a logic error.""" + if self.app_uuid is None: + msg = 'function should not be called before application uuid is set.' + return None, msg + return None, None + + def fail_if_uuid(self, fname): + """Prevent a logic error.""" + if self.app_uuid is not None: + msg = 'function %s should not be called when application uuid is set: %s.' % (fname, self.app_uuid) + return None, msg + return None, None diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py new file mode 100644 index 000000000..4c570f8d8 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_generic.py @@ -0,0 +1,101 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support functions for NetApp ansible modules + + Provides common processing for responses and errors from REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +def build_query_with_fields(query, fields): + ''' for GET requests''' + if fields is not None and query is None: + query = {} + if fields is not None: + query['fields'] = fields + return query + + +def build_query_with_timeout(query, timeout): + ''' for POST, PATCH, DELETE requests''' + params = {} if query else None + if timeout > 0: + # without return_timeout, REST returns immediately with a 202 and a job link + # but the job status is 'running' + # with return_timeout, REST returns quickly with a 200 and a job link + # and the job status is 'success' + params = dict(return_timeout=timeout) + if query is not None: + params.update(query) + return params + + +def get_one_record(rest_api, api, query=None, fields=None): + query = build_query_with_fields(query, fields) + response, error = rest_api.get(api, query) + record, error = rrh.check_for_0_or_1_records(api, response, error, query) + return record, error + + +def get_0_or_more_records(rest_api, api, query=None, fields=None): + query = build_query_with_fields(query, fields) + response, error = rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, response, error) + return records, error + + +def post_async(rest_api, api, body, query=None, timeout=30, job_timeout=30, headers=None, raw_error=False, files=None): + # see delete_async for async and sync operations and status codes + response, error = rest_api.post(api, body=body, params=build_query_with_timeout(query, timeout), headers=headers, files=files) + # limit the polling interval to something between 5 seconds and 60 seconds + increment = min(max(job_timeout / 6, 5), 60) + response, error = rrh.check_for_error_and_job_results(api, response, error, rest_api, increment=increment, timeout=job_timeout, raw_error=raw_error) + return response, error + + +def patch_async(rest_api, api, uuid_or_name, body, query=None, timeout=30, job_timeout=30, headers=None, raw_error=False, files=None): + # cluster does not use uuid or name, and query based PATCH does not use UUID (for restit) + api = '%s/%s' % (api, uuid_or_name) if uuid_or_name is not None else api + response, error = rest_api.patch(api, body=body, params=build_query_with_timeout(query, timeout), headers=headers, files=files) + increment = min(max(job_timeout / 6, 5), 60) + response, error = rrh.check_for_error_and_job_results(api, response, error, rest_api, increment=increment, timeout=job_timeout, raw_error=raw_error) + return response, error + + +def delete_async(rest_api, api, uuid, query=None, body=None, timeout=30, job_timeout=30, headers=None, raw_error=False): + # query based DELETE does not use UUID (for restit) + api = '%s/%s' % (api, uuid) if uuid is not None else api + response, error = rest_api.delete(api, body=body, params=build_query_with_timeout(query, timeout), headers=headers) + increment = min(max(job_timeout / 6, 5), 60) + response, error = rrh.check_for_error_and_job_results(api, response, error, rest_api, increment=increment, timeout=job_timeout, raw_error=raw_error) + return response, error diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py new file mode 100644 index 000000000..597c02d50 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_owning_resource.py @@ -0,0 +1,26 @@ +""" Support functions for NetApp ansible modules + Provides common processing for responses and errors from REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +def get_export_policy_id(rest_api, policy_name, svm_name, module): + api = 'protocols/nfs/export-policies' + query = {'name': policy_name, 'svm.name': svm_name} + record, error = rest_generic.get_one_record(rest_api, api, query) + if error: + module.fail_json(msg='Could not find export policy %s on SVM %s' % (policy_name, svm_name)) + return record['id'] if record else None + + +def get_volume_uuid(rest_api, volume_name, svm_name, module): + api = 'storage/volumes' + query = {'name': volume_name, 'svm.name': svm_name} + record, error = rest_generic.get_one_record(rest_api, api, query) + if error: + module.fail_json(msg='Could not find volume %s on SVM %s' % (volume_name, svm_name)) + return record['uuid'] if record else None diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py new file mode 100644 index 000000000..59e04b3b3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_response_helpers.py @@ -0,0 +1,137 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support functions for NetApp ansible modules + + Provides common processing for responses and errors from REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def api_error(api, error): + """format error message for api error, if error is present""" + return "calling: %s: got %s." % (api, error) if error is not None else None + + +def no_response_error(api, response): + """format error message for empty response""" + return "calling: %s: no response %s." % (api, repr(response)) + + +def job_error(response, error): + """format error message for job error""" + return "job reported error: %s, received %s." % (error, repr(response)) + + +def unexpected_response_error(api, response, query=None): + """format error message for reponse not matching expectations""" + msg = "calling: %s: unexpected response %s." % (api, repr(response)) + if query: + msg += " for query: %s" % repr(query) + return response, msg + + +def get_num_records(response): + """ num_records is not always present + if absent, count the records or assume 1 + """ + if 'num_records' in response: + return response['num_records'] + return len(response['records']) if 'records' in response else 1 + + +def check_for_0_or_1_records(api, response, error, query=None): + """return None if no record was returned by the API + return record if one record was returned by the API + return error otherwise (error, no response, more than 1 record) + """ + if error: + return (None, api_error(api, error)) if api else (None, error) + if not response: + return None, no_response_error(api, response) + num_records = get_num_records(response) + if num_records == 0: + return None, None # not found + if num_records != 1: + return unexpected_response_error(api, response, query) + if 'records' in response: + return response['records'][0], None + return response, None + + +def check_for_0_or_more_records(api, response, error): + """return None if no record was returned by the API + return records if one or more records was returned by the API + return error otherwise (error, no response) + """ + if error: + return (None, api_error(api, error)) if api else (None, error) + if not response: + return None, no_response_error(api, response) + if get_num_records(response) == 0: + return None, None # not found + if 'records' in response: + return response['records'], None + error = 'No "records" key in %s' % response + return (None, api_error(api, error)) if api else (None, error) + + +def check_for_error_and_job_results(api, response, error, rest_api, **kwargs): + """report first error if present + otherwise call wait_on_job and retrieve job response or error + """ + format_error = not kwargs.pop('raw_error', False) + if error: + if format_error: + error = api_error(api, error) + # we expect two types of response + # a plain response, for synchronous calls + # or a job response, for asynchronous calls + # and it's possible to expect both when 'return_timeout' > 0 + # + # when using a query instead of UUID, REST return jobs (a list of jobs) rather than a single job + # only restit can send a query, all other calls are using a UUID. + elif isinstance(response, dict): + job = None + if 'job' in response: + job = response['job'] + elif 'jobs' in response: + if response['num_records'] > 1: + error = "multiple jobs in progress, can't check status" + else: + job = response['jobs'][0] + if job: + job_response, error = rest_api.wait_on_job(job, **kwargs) + if error: + if format_error: + error = job_error(response, error) + else: + response['job_response'] = job_response + return response, error diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py new file mode 100644 index 000000000..b7c2e66a2 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_user.py @@ -0,0 +1,49 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support functions for NetApp ansible modules + + Provides common processing for responses and errors from REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +def get_users(rest_api, parameters, fields=None): + api = 'security/accounts' + query = dict() + for field in parameters: + query[field] = parameters[field] + if fields is not None: + query['fields'] = fields + response, error = rest_api.get(api, query) + users, error = rrh.check_for_0_or_more_records(api, response, error) + return users, error diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py new file mode 100644 index 000000000..10af36754 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_volume.py @@ -0,0 +1,61 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support functions for NetApp ansible modules + + Provides common processing for responses and errors from REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +def get_volumes(rest_api, vserver=None, name=None): + api = 'storage/volumes' + query = {} + if vserver is not None: + query['svm.name'] = vserver + if name is not None: + query['name'] = name + if not query: + query = None + return rest_generic.get_0_or_more_records(rest_api, api, query) + + +def get_volume(rest_api, vserver, name, fields=None): + api = 'storage/volumes' + query = dict(name=name) + query['svm.name'] = vserver + return rest_generic.get_one_record(rest_api, api, query, fields=fields) + + +def patch_volume(rest_api, uuid, body, query=None, job_timeout=120): + api = 'storage/volumes' + return rest_generic.patch_async(rest_api, api, uuid, body, query=query, job_timeout=job_timeout) diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py b/ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py new file mode 100644 index 000000000..cbdfdaef9 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/rest_vserver.py @@ -0,0 +1,61 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support functions for NetApp ansible modules + + Provides common processing for responses and errors from REST calls +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.netapp.ontap.plugins.module_utils.rest_generic import get_one_record + + +def get_vserver(rest_api, name, fields=None): + api = 'svm/svms' + query = {'name': name} + if fields is not None: + query['fields'] = fields + vserver, error = get_one_record(rest_api, api, query) + return vserver, error + + +def get_vserver_uuid(rest_api, name, module=None, error_on_none=False): + """ returns a tuple (uuid, error) + when module is set and an error is found, fails the module and exit + when error_on_none IS SET, force an error if vserver is not found + """ + record, error = get_vserver(rest_api, name, 'uuid') + if error and module: + module.fail_json(msg="Error fetching vserver %s: %s" % (name, error)) + if not error and record is None and error_on_none: + error = "vserver %s does not exist or is not a data vserver." % name + if module: + module.fail_json(msg="Error %s" % error) + return record['uuid'] if not error and record else None, error diff --git a/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py b/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py new file mode 100644 index 000000000..71a3f2496 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/module_utils/zapis_svm.py @@ -0,0 +1,133 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules + + Provides accesss to SVM (vserver) resources using ZAPI calls +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import traceback + +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +def get_vserver(svm_cx, vserver_name): + """ + Return vserver information. + + :return: + vserver object if vserver found + None if vserver is not found + :rtype: object/None + """ + vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-info', **{'vserver-name': vserver_name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + vserver_info.add_child_elem(query) + + result = svm_cx.invoke_successfully(vserver_info, enable_tunneling=False) + vserver_details = None + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + vserver_info = attributes_list.get_child_by_name('vserver-info') + aggr_list = [] + # vserver aggr-list can be empty by default + get_list = vserver_info.get_child_by_name('aggr-list') + if get_list is not None: + aggregates = get_list.get_children() + aggr_list.extend(aggr.get_content() for aggr in aggregates) + protocols = [] + # allowed-protocols is not empty for data SVM, but is for node SVM + allowed_protocols = vserver_info.get_child_by_name('allowed-protocols') + if allowed_protocols is not None: + get_protocols = allowed_protocols.get_children() + protocols.extend(protocol.get_content() for protocol in get_protocols) + vserver_details = {'name': vserver_info.get_child_content('vserver-name'), + 'root_volume': vserver_info.get_child_content('root-volume'), + 'root_volume_aggregate': vserver_info.get_child_content('root-volume-aggregate'), + 'root_volume_security_style': vserver_info.get_child_content('root-volume-security-style'), + 'subtype': vserver_info.get_child_content('vserver-subtype'), + 'aggr_list': aggr_list, + 'language': vserver_info.get_child_content('language'), + 'quota_policy': vserver_info.get_child_content('quota-policy'), + 'snapshot_policy': vserver_info.get_child_content('snapshot-policy'), + 'allowed_protocols': protocols, + 'ipspace': vserver_info.get_child_content('ipspace'), + 'comment': vserver_info.get_child_content('comment'), + 'max_volumes': vserver_info.get_child_content('max-volumes')} + + return vserver_details + + +def modify_vserver(svm_cx, module, name, modify, parameters=None): + ''' + Modify vserver. + :param name: vserver name + :param modify: list of modify attributes + :param parameters: customer original inputs + modify only contains the difference between the customer inputs and current + for some attributes, it may be safer to apply the original inputs + ''' + if parameters is None: + parameters = modify + + vserver_modify = netapp_utils.zapi.NaElement('vserver-modify') + vserver_modify.add_new_child('vserver-name', name) + for attribute in modify: + if attribute == 'comment': + vserver_modify.add_new_child('comment', parameters['comment']) + if attribute == 'language': + vserver_modify.add_new_child('language', parameters['language']) + if attribute == 'quota_policy': + vserver_modify.add_new_child('quota-policy', parameters['quota_policy']) + if attribute == 'snapshot_policy': + vserver_modify.add_new_child('snapshot-policy', parameters['snapshot_policy']) + if attribute == 'max_volumes': + vserver_modify.add_new_child('max-volumes', parameters['max_volumes']) + if attribute == 'allowed_protocols': + allowed_protocols = netapp_utils.zapi.NaElement('allowed-protocols') + for protocol in parameters['allowed_protocols']: + allowed_protocols.add_new_child('protocol', protocol) + vserver_modify.add_child_elem(allowed_protocols) + if attribute == 'aggr_list': + aggregates = netapp_utils.zapi.NaElement('aggr-list') + for aggr in parameters['aggr_list']: + aggregates.add_new_child('aggr-name', aggr) + vserver_modify.add_child_elem(aggregates) + try: + svm_cx.invoke_successfully(vserver_modify, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as exc: + module.fail_json(msg='Error modifying SVM %s: %s' % (name, to_native(exc)), + exception=traceback.format_exc()) diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py new file mode 100644 index 000000000..d1ca57250 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory.py @@ -0,0 +1,328 @@ +#!/usr/bin/python + +# (c) 2020-2022, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_active_directory +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp ONTAP configure active directory +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 20.9.0 +description: + - Configure Active Directory. + - REST requires ONTAP 9.12.1 or later. +options: + state: + description: + - Whether the Active Directory should exist or not + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + + account_name: + description: + - Active Directory account NetBIOS name. + required: true + type: str + + admin_password: + description: + - Administrator password required for Active Directory account creation. + required: true + type: str + + admin_username: + description: + - Administrator username required for Active Directory account creation. + required: true + type: str + + domain: + description: + - Fully qualified domain name. + type: str + aliases: ['fqdn'] + + force_account_overwrite: + description: + - If true and a machine account with the same name as specified in 'account-name' exists in Active Directory, it will be overwritten and reused. + type: bool + + organizational_unit: + description: + - Organizational unit under which the Active Directory account will be created. + type: str + +notes: + - Supports check_mode. + - supports ZAPI and REST. REST requires ONTAP 9.12.1 or later. +''' +EXAMPLES = """ +- + name: Ontap test + hosts: localhost + collections: + - netapp.ontap + tasks: + - name: Create active directory account. + netapp.ontap.na_ontap_active_directory: + hostname: 10.193.78.219 + username: admin + password: netapp1! + https: True + validate_certs: False + vserver: laurentncluster-1 + state: present + account_name: carchi + admin_password: password + admin_username: carchi + domain: addomain.com + + - name: Modify domain name. + netapp.ontap.na_ontap_active_directory: + hostname: 10.193.78.219 + username: admin + password: netapp1! + https: True + validate_certs: False + vserver: laurentncluster-1 + state: present + account_name: carchi + admin_password: password + admin_username: carchi + domain: addomain_new.com + force_account_overwrite: True + + - name: Delete active directory account. + netapp.ontap.na_ontap_active_directory: + hostname: 10.193.78.219 + username: admin + password: netapp1! + https: True + validate_certs: False + vserver: laurentncluster-1 + state: absent + account_name: carchi + admin_password: password + admin_username: carchi + domain: addomain.com +""" +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapActiveDirectory: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + vserver=dict(required=True, type='str'), + state=dict(choices=['present', 'absent'], default='present'), + account_name=dict(required=True, type='str'), + admin_password=dict(required=True, type='str', no_log=True), + admin_username=dict(required=True, type='str'), + domain=dict(type="str", default=None, aliases=['fqdn']), + force_account_overwrite=dict(type="bool", default=None), + organizational_unit=dict(type="str", default=None) + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.svm_uuid = None + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 12, 1): + msg = 'REST requires ONTAP 9.12.1 or later for active directory APIs' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_active_directory(self): + if self.use_rest: + return self.get_active_directory_rest() + active_directory_iter = netapp_utils.zapi.NaElement('active-directory-account-get-iter') + active_directory_info = netapp_utils.zapi.NaElement('active-directory-account-config') + active_directory_info.add_new_child('account-name', self.parameters['account_name']) + active_directory_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(active_directory_info) + active_directory_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(active_directory_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error searching for Active Directory %s: %s' % + (self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + record = {} + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + account_info = result.get_child_by_name('attributes-list').get_child_by_name('active-directory-account-config') + for zapi_key, key in (('account-name', 'account_name'), ('domain', 'domain'), ('organizational-unit', 'organizational_unit')): + value = account_info.get_child_content(zapi_key) + if value is not None: + record[key] = value + # normalize case, using user inputs + for key, value in record.items(): + if key in self.parameters and self.parameters[key].lower() == value.lower(): + record[key] = self.parameters[key] + return record or None + + def create_active_directory(self): + if self.use_rest: + return self.create_active_directory_rest() + active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-create') + active_directory_obj.add_new_child('account-name', self.parameters['account_name']) + active_directory_obj.add_new_child('admin-password', self.parameters['admin_password']) + active_directory_obj.add_new_child('admin-username', self.parameters['admin_username']) + if self.parameters.get('domain'): + active_directory_obj.add_new_child('domain', self.parameters['domain']) + if self.parameters.get('force_account_overwrite'): + active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite'])) + if self.parameters.get('organizational_unit'): + active_directory_obj.add_new_child('organizational-unit', self.parameters['organizational_unit']) + try: + self.server.invoke_successfully(active_directory_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating vserver Active Directory %s: %s' % + (self.parameters['account_name'], to_native(error))) + + def delete_active_directory(self): + if self.use_rest: + return self.delete_active_directory_rest() + active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-delete') + active_directory_obj.add_new_child('admin-password', self.parameters['admin_password']) + active_directory_obj.add_new_child('admin-username', self.parameters['admin_username']) + try: + self.server.invoke_successfully(active_directory_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting vserver Active Directory %s: %s' % + (self.parameters['account_name'], to_native(error))) + + def modify_active_directory(self): + if self.use_rest: + return self.modify_active_directory_rest() + active_directory_obj = netapp_utils.zapi.NaElement('active-directory-account-modify') + active_directory_obj.add_new_child('admin-password', self.parameters['admin_password']) + active_directory_obj.add_new_child('admin-username', self.parameters['admin_username']) + if self.parameters.get('domain'): + active_directory_obj.add_new_child('domain', self.parameters['domain']) + if self.parameters.get('force_account_overwrite'): + active_directory_obj.add_new_child('force-account-overwrite', str(self.parameters['force_account_overwrite'])) + try: + self.server.invoke_successfully(active_directory_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying vserver Active Directory %s: %s' % + (self.parameters['account_name'], to_native(error))) + + def get_active_directory_rest(self): + api = 'protocols/active-directory' + query = { + 'name': self.parameters['account_name'], + 'svm.name': self.parameters['vserver'], + 'fields': 'fqdn,name,organizational_unit' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error searching for Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + if record: + self.svm_uuid = record['svm']['uuid'] + return { + 'account_name': record.get('name'), + 'domain': record.get('fqdn'), + 'organizational_unit': record.get('organizational_unit') + } + return None + + def create_active_directory_rest(self): + api = 'protocols/active-directory' + body = { + 'svm.name': self.parameters['vserver'], + 'name': self.parameters['account_name'], + 'username': self.parameters['admin_username'], + 'password': self.parameters['admin_password'] + } + if self.parameters.get('domain'): + body['fqdn'] = self.parameters['domain'] + if self.parameters.get('force_account_overwrite'): + body['force_account_overwrite'] = self.parameters['force_account_overwrite'] + if self.parameters.get('organizational_unit'): + body['organizational_unit'] = self.parameters['organizational_unit'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating vserver Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_active_directory_rest(self): + api = 'protocols/active-directory' + body = {'username': self.parameters['admin_username'], 'password': self.parameters['admin_password']} + if self.parameters.get('domain'): + body['fqdn'] = self.parameters['domain'] + if self.parameters.get('force_account_overwrite'): + body['force_account_overwrite'] = self.parameters['force_account_overwrite'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body) + if error: + self.module.fail_json(msg='Error modifying vserver Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_active_directory_rest(self): + dummy, error = rest_generic.delete_async(self.rest_api, 'protocols/active-directory', self.svm_uuid, + body={'username': self.parameters['admin_username'], 'password': self.parameters['admin_password']}) + if error: + self.module.fail_json(msg='Error deleting vserver Active Directory %s: %s' % (self.parameters['account_name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_active_directory() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = None + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify and 'organizational_unit' in modify: + self.module.fail_json(msg='Error: organizational_unit cannot be modified; found %s.' % modify) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_active_directory() + elif cd_action == 'delete': + self.delete_active_directory() + elif modify: + self.modify_active_directory() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapActiveDirectory() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py new file mode 100644 index 000000000..844118344 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_active_directory_domain_controllers.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_active_directory_domain_controllers +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp ONTAP configure active directory preferred domain controllers +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 22.7.0 +description: + - Configure Active Directory preferred Domain Controllers. +options: + state: + description: + - Whether the Active Directory preferred Domain Controllers configuration should exist or not + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + + fqdn: + description: + - Fully Qualified Domain Name. + required: true + type: str + + server_ip: + description: + - IP address of the preferred DC. The address can be either an IPv4 or an IPv6 address. + required: true + type: str + + skip_config_validation: + description: + - Skip the validation of the specified preferred DC configuration. + type: bool + +notes: + - This module requires ONTAP 9.12.1 or later for REST API. + - CLI support is available for other lower ONTAP versions. +''' +EXAMPLES = """ + - name: Create active directory preferred domain controllers + netapp.ontap.na_ontap_active_directory_domain_controllers: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + vserver: ansible + state: present + fqdn: test.com + server_ip: 10.10.10.10 + + - name: Delete active directory preferred domain controllers + netapp.ontap.na_ontap_active_directory_domain_controllers: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + vserver: ansible + state: absent + fqdn: test.com + server_ip: 10.10.10.10 +""" +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppOntapActiveDirectoryDC: + """ + Create or delete Active Directory preferred domain controllers + """ + def __init__(self): + """ + Initialize the Ontap ActiveDirectoryDC class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + fqdn=dict(required=True, type='str'), + server_ip=dict(required=True, type='str'), + skip_config_validation=dict(required=False, type='bool'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_active_directory_domain_controllers', 9, 6) + self.svm_uuid = None + + def get_active_directory_preferred_domain_controllers_rest(self): + """ + Retrieves the Active Directory preferred DC configuration of an SVM. + """ + if self.rest_api.meets_rest_minimum_version(True, 9, 12, 0): + api = "protocols/active-directory/%s/preferred-domain-controllers" % (self.svm_uuid) + query = { + 'svm.name': self.parameters['vserver'], + 'fqdn': self.parameters['fqdn'], + 'server_ip': self.parameters['server_ip'], + 'fields': 'server_ip,fqdn' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching Active Directory preferred DC configuration of an SVM: %s" % error) + if record: + return record + else: + api = 'private/cli/vserver/active-directory/preferred-dc' + query = { + 'vserver': self.parameters['vserver'], + 'domain': self.parameters['fqdn'], + 'preferred_dc': self.parameters['server_ip'], + 'fields': 'domain,preferred-dc' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching Active Directory preferred DC configuration of an SVM using cli: %s" % error) + if record: + return { + 'server_ip': self.na_helper.safe_get(record, ['preferred_dc']), + 'fqdn': self.na_helper.safe_get(record, ['domain']) + } + return None + + def create_active_directory_preferred_domain_controllers_rest(self): + """ + Adds the Active Directory preferred DC configuration to an SVM. + """ + query = {} + if self.rest_api.meets_rest_minimum_version(True, 9, 12, 0): + api = "protocols/active-directory/%s/preferred-domain-controllers" % (self.svm_uuid) + body = { + 'fqdn': self.parameters['fqdn'], + 'server_ip': self.parameters['server_ip'] + } + if 'skip_config_validation' in self.parameters: + query['skip_config_validation'] = self.parameters['skip_config_validation'] + else: + api = "private/cli/vserver/active-directory/preferred-dc/add" + body = { + "vserver": self.parameters['vserver'], + "domain": self.parameters['fqdn'], + "preferred_dc": [self.parameters['server_ip']] + } + if 'skip_config_validation' in self.parameters: + query['skip_config_validation'] = self.parameters['skip_config_validation'] + dummy, error = rest_generic.post_async(self.rest_api, api, body, query) + if error: + self.module.fail_json(msg="Error on adding Active Directory preferred DC configuration to an SVM: %s" % error) + + def delete_active_directory_preferred_domain_controllers_rest(self): + """ + Removes the Active Directory preferred DC configuration from an SVM. + """ + if self.rest_api.meets_rest_minimum_version(True, 9, 12, 0): + api = "protocols/active-directory/%s/preferred-domain-controllers/%s/%s" % (self.svm_uuid, self.parameters['fqdn'], self.parameters['server_ip']) + record, error = rest_generic.delete_async(self.rest_api, api, None) + else: + api = "private/cli/vserver/active-directory/preferred-dc/remove" + body = { + "vserver": self.parameters['vserver'], + "domain": self.parameters['fqdn'], + "preferred_dc": [self.parameters['server_ip']] + } + dummy, error = rest_generic.delete_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg="Error on deleting Active Directory preferred DC configuration of an SVM: %s" % error) + + def apply(self): + self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + current = self.get_active_directory_preferred_domain_controllers_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_active_directory_preferred_domain_controllers_rest() + elif cd_action == 'delete': + self.delete_active_directory_preferred_domain_controllers_rest() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap Cifs Local Group object and runs the correct play task + """ + obj = NetAppOntapActiveDirectoryDC() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py new file mode 100644 index 000000000..560e81069 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_aggregate.py @@ -0,0 +1,1121 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_aggregate +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_aggregate +short_description: NetApp ONTAP manage aggregates. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create, delete, or manage aggregates on ONTAP. + +options: + + state: + description: + - Whether the specified aggregate should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + service_state: + description: + - Whether the specified aggregate should be enabled or disabled. Creates aggregate if doesnt exist. + - Supported from 9.11.1 or later in REST. + choices: ['online', 'offline'] + type: str + + name: + description: + - The name of the aggregate to manage. + required: true + type: str + + from_name: + description: + - Name of the aggregate to be renamed. + type: str + version_added: 2.7.0 + + nodes: + description: + - Node(s) for the aggregate to be created on. If no node specified, mgmt lif home will be used. + - ZAPI only - if multiple nodes specified an aggr stripe will be made. + - With REST, only one node can be specified. If disk_count is present, node name is required. + type: list + elements: str + + disk_type: + description: + - Type of disk to use to build aggregate. + - Not supported with REST - see C(disk_class). + - SSD-NVM, SSD-CAP were added with ONTAP 9.6. + - VMLUN was added with ONTAP 9.9. + choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD-CAP', 'SSD-NVM', 'VMDISK', 'VMLUN', 'VMLUN-SSD'] + type: str + version_added: 2.7.0 + + disk_class: + description: + - Class of disk to use to build aggregate. + - C(capacity_flash) is listed in swagger, but rejected as invalid by ONTAP. + choices: ['capacity', 'performance', 'archive', 'solid_state', 'array', 'virtual', 'data_center', 'capacity_flash'] + type: str + version_added: 21.16.0 + + disk_count: + description: + - Number of disks to place into the aggregate, including parity disks. + - The disks in this newly-created aggregate come from the spare disk pool. + - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided. + - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1]. + - Required when C(state=present). + - Modifiable only if specified disk_count is larger than current disk_count. + - Cannot create raidgroup with 1 disk when using raid type raid4. + - If the disk_count % raid_size == 1, only disk_count/raid_size * raid_size will be added. + - If disk_count is 6, raid_type is raid4, raid_size 4, all 6 disks will be added. + - If disk_count is 5, raid_type is raid4, raid_size 4, 5/4 * 4 = 4 will be added. 1 will not be added. + - With REST, C(nodes) is required if C(disk_count) is present. + type: int + + disk_size: + description: + - Disk size to use in 4K block size. Disks within 10% of specified size will be used. + - With REST, this is converted to bytes using 4096. Use C(disk_size_with_unit) to skip the conversion. + type: int + version_added: 2.7.0 + + disk_size_with_unit: + description: + - Disk size to use in the specified unit. + - It is a positive integer number followed by unit of T/G/M/K. For example, 72G, 1T and 32M. + - Or the unit can be omitted for bytes (REST also accepts B). + - This option is ignored if a specific list of disks is specified through the "disks" parameter. + - You must only use one of either "disk-size" or "disk-size-with-unit" parameters. + - With REST, this is converted to bytes, assuming K=1024. + type: str + + raid_size: + description: + - Sets the maximum number of drives per raid group. + type: int + version_added: 2.7.0 + + raid_type: + description: + - Specifies the type of RAID groups to use in the new aggregate. + - raid_0 is only available on ONTAP Select. + choices: ['raid4', 'raid_dp', 'raid_tec', 'raid_0'] + type: str + version_added: 2.7.0 + + unmount_volumes: + description: + - If set to "true", this option specifies that all of the volumes hosted by the given aggregate are to be unmounted + before the offline operation is executed. + - By default, the system will reject any attempt to offline an aggregate that hosts one or more online volumes. + - Not supported with REST, by default REST unmount volumes when trying to offline aggregate. + type: bool + + disks: + description: + - Specific list of disks to use for the new aggregate. + - To create a "mirrored" aggregate with a specific list of disks, both 'disks' and 'mirror_disks' options must be supplied. + Additionally, the same number of disks must be supplied in both lists. + - Not supported with REST. + type: list + elements: str + version_added: 2.8.0 + + is_mirrored: + description: + - Specifies that the new aggregate be mirrored (have two plexes). + - If set to true, then the indicated disks will be split across the two plexes. By default, the new aggregate will not be mirrored. + - This option cannot be used when a specific list of disks is supplied with either the 'disks' or 'mirror_disks' options. + type: bool + version_added: 2.8.0 + + mirror_disks: + description: + - List of mirror disks to use. It must contain the same number of disks specified in 'disks'. + - Not supported with REST. + type: list + elements: str + version_added: 2.8.0 + + spare_pool: + description: + - Specifies the spare pool from which to select spare disks to use in creation of a new aggregate. + - Not supported with REST. + choices: ['Pool0', 'Pool1'] + type: str + version_added: 2.8.0 + + wait_for_online: + description: + - Set this parameter to 'true' for synchronous execution during create (wait until aggregate status is online). + - Set this parameter to 'false' for asynchronous execution. + - For asynchronous, execution exits as soon as the request is sent, without checking aggregate status. + - Ignored with REST (always wait). + type: bool + default: false + version_added: 2.8.0 + + time_out: + description: + - time to wait for aggregate creation in seconds. + - default is set to 100 seconds. + type: int + default: 100 + version_added: 2.8.0 + + object_store_name: + description: + - Name of the object store configuration attached to the aggregate. + type: str + version_added: 2.9.0 + + allow_flexgroups: + description: + - This optional parameter allows attaching object store to an aggregate containing FlexGroup constituents. The default value is false. + - Mixing FabricPools and non-FabricPools within a FlexGroup is not recommended. + - All aggregates hosting constituents of a FlexGroup should be attached to the object store. + type: bool + version_added: 22.3.0 + + snaplock_type: + description: + - Type of snaplock for the aggregate being created. + choices: ['compliance', 'enterprise', 'non_snaplock'] + type: str + version_added: 20.1.0 + + ignore_pool_checks: + description: + - only valid when I(disks) option is used. + - disks in a plex should belong to the same spare pool, and mirror disks to another spare pool. + - when set to true, these checks are ignored. + - Ignored with REST as I(disks) is not supported. + type: bool + version_added: 20.8.0 + + encryption: + description: + - whether to enable software encryption. + - this is equivalent to -encrypt-with-aggr-key when using the CLI. + - requires a VE license. + type: bool + version_added: 21.14.0 + + tags: + description: + - Tags are an optional way to track the uses of a resource. + - Tag values must be formatted as key:value strings, example ["team:csi", "environment:test"] + type: list + elements: str + version_added: 22.6.0 + +notes: + - supports check_mode. + - support ZAPI and REST. + +''' + +EXAMPLES = """ +- name: Create Aggregates and wait 5 minutes until aggregate is online in ZAPI. + netapp.ontap.na_ontap_aggregate: + state: present + service_state: online + name: ansibleAggr + disk_count: 10 + wait_for_online: True + time_out: 300 + snaplock_type: non_snaplock + use_rest: never + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Create Aggregates in REST. + netapp.ontap.na_ontap_aggregate: + state: present + service_state: online + name: ansibleAggr + disk_count: 10 + nodes: ontap-node + snaplock_type: non_snaplock + use_rest: always + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Manage Aggregates in ZAPI, modify service state. + netapp.ontap.na_ontap_aggregate: + state: present + service_state: offline + unmount_volumes: true + name: ansibleAggr + disk_count: 10 + use_rest: never + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Manage Aggregates in REST, increase disk count. + netapp.ontap.na_ontap_aggregate: + state: present + name: ansibleAggr + disk_count: 20 + nodes: ontap-node + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Attach object store + netapp.ontap.na_ontap_aggregate: + state: present + name: aggr4 + object_store_name: sgws_305 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Rename Aggregates + netapp.ontap.na_ontap_aggregate: + state: present + service_state: online + from_name: ansibleAggr + name: ansibleAggr2 + disk_count: 20 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Delete Aggregates + netapp.ontap.na_ontap_aggregate: + state: absent + service_state: offline + unmount_volumes: true + name: ansibleAggr + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" +import re +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapAggregate: + ''' object initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + disks=dict(required=False, type='list', elements='str'), + disk_count=dict(required=False, type='int', default=None), + disk_size=dict(required=False, type='int'), + disk_size_with_unit=dict(required=False, type='str'), + disk_class=dict(required=False, + choices=['capacity', 'performance', 'archive', 'solid_state', 'array', 'virtual', 'data_center', 'capacity_flash']), + disk_type=dict(required=False, + choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD-CAP', 'SSD-NVM', 'VMDISK', 'VMLUN', 'VMLUN-SSD']), + from_name=dict(required=False, type='str'), + mirror_disks=dict(required=False, type='list', elements='str'), + nodes=dict(required=False, type='list', elements='str'), + is_mirrored=dict(required=False, type='bool'), + raid_size=dict(required=False, type='int'), + raid_type=dict(required=False, choices=['raid4', 'raid_dp', 'raid_tec', 'raid_0']), + service_state=dict(required=False, choices=['online', 'offline']), + spare_pool=dict(required=False, choices=['Pool0', 'Pool1']), + state=dict(required=False, choices=['present', 'absent'], default='present'), + unmount_volumes=dict(required=False, type='bool'), + wait_for_online=dict(required=False, type='bool', default=False), + time_out=dict(required=False, type='int', default=100), + object_store_name=dict(required=False, type='str'), + allow_flexgroups=dict(required=False, type='bool'), + snaplock_type=dict(required=False, type='str', choices=['compliance', 'enterprise', 'non_snaplock']), + ignore_pool_checks=dict(required=False, type='bool'), + encryption=dict(required=False, type='bool'), + tags=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ('is_mirrored', 'disks'), + ('is_mirrored', 'mirror_disks'), + ('is_mirrored', 'spare_pool'), + ('spare_pool', 'disks'), + ('disk_count', 'disks'), + ('disk_size', 'disk_size_with_unit'), + ('disk_class', 'disk_type'), + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.uuid = None + # some attributes are not supported in earlier REST implementation + unsupported_rest_properties = ['disks', 'disk_type', 'mirror_disks', 'spare_pool', 'unmount_volumes'] + partially_supported_rest_properties = [['service_state', (9, 11, 1)], ['tags', (9, 13, 1)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if 'tags' in self.parameters: + self.module.fail_json(msg="Error: tags only supported with REST.") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + if self.parameters['state'] == 'present': + self.validate_options() + + def validate_options(self): + errors = [] + if self.use_rest: + if len(self.parameters.get('nodes', [])) > 1: + errors.append('only one node can be specified when using rest, found %s' % self.parameters['nodes']) + if 'disk_count' in self.parameters and 'nodes' not in self.parameters: + errors.append('nodes is required when disk_count is present') + else: + if self.parameters.get('mirror_disks') is not None and self.parameters.get('disks') is None: + errors.append('mirror_disks require disks options to be set') + if errors: + plural = 's' if len(errors) > 1 else '' + self.module.fail_json(msg='Error%s when validating options: %s.' % (plural, '; '.join(errors))) + + def aggr_get_iter(self, name): + """ + Return aggr-get-iter query results + :param name: Name of the aggregate + :return: NaElement if aggregate found, None otherwise + """ + + aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-attributes', **{'aggregate-name': name}) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + aggr_get_iter.add_child_elem(query) + result = None + try: + result = self.server.invoke_successfully(aggr_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) != '13040': + self.module.fail_json(msg='Error getting aggregate: %s' % to_native(error), exception=traceback.format_exc()) + return result + + def get_aggr(self, name=None): + """ + Fetch details if aggregate exists. + :param name: Name of the aggregate to be fetched + :return: + Dictionary of current details if aggregate found + None if aggregate is not found + """ + if name is None: + name = self.parameters.get('name') + if self.use_rest: + return self.get_aggr_rest(name) + aggr_get = self.aggr_get_iter(name) + if aggr_get and aggr_get.get_child_by_name('num-records') and int(aggr_get.get_child_content('num-records')) >= 1: + attr = aggr_get.get_child_by_name('attributes-list').get_child_by_name('aggr-attributes') + current_aggr = {'service_state': attr.get_child_by_name('aggr-raid-attributes').get_child_content('state')} + if attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count'): + current_aggr['disk_count'] = int(attr.get_child_by_name('aggr-raid-attributes').get_child_content('disk-count')) + if attr.get_child_by_name('aggr-raid-attributes').get_child_content('encrypt-with-aggr-key'): + current_aggr['encryption'] = attr.get_child_by_name('aggr-raid-attributes').get_child_content('encrypt-with-aggr-key') == 'true' + snaplock_type = self.na_helper.safe_get(attr, ['aggr-snaplock-attributes', 'snaplock-type']) + if snaplock_type: + current_aggr['snaplock_type'] = snaplock_type + return current_aggr + return None + + def disk_get_iter(self, name): + """ + Return storage-disk-get-iter query results + Filter disk list by aggregate name, and only reports disk-name and plex-name + :param name: Name of the aggregate + :return: NaElement + """ + + disk_get_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter') + query_details = { + 'query': { + 'storage-disk-info': { + 'disk-raid-info': { + 'disk-aggregate-info': { + 'aggregate-name': name + } + } + } + } + } + disk_get_iter.translate_struct(query_details) + attributes = { + 'desired-attributes': { + 'storage-disk-info': { + 'disk-name': None, + 'disk-raid-info': { + 'disk_aggregate_info': { + 'plex-name': None + } + } + } + } + } + disk_get_iter.translate_struct(attributes) + + result = None + try: + result = self.server.invoke_successfully(disk_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting disks: %s' % to_native(error), exception=traceback.format_exc()) + return result + + def get_aggr_disks(self, name): + """ + Fetch disks that are used for this aggregate. + :param name: Name of the aggregate to be fetched + :return: + list of tuples (disk-name, plex-name) + empty list if aggregate is not found + """ + disks = [] + aggr_get = self.disk_get_iter(name) + if aggr_get and aggr_get.get_child_by_name('num-records') and int(aggr_get.get_child_content('num-records')) >= 1: + attr = aggr_get.get_child_by_name('attributes-list') + disks = [(disk_info.get_child_content('disk-name'), + disk_info.get_child_by_name('disk-raid-info').get_child_by_name('disk-aggregate-info').get_child_content('plex-name')) + for disk_info in attr.get_children()] + return disks + + def object_store_get_iter(self, name): + """ + Return aggr-object-store-get query results + :return: NaElement if object-store for given aggregate found, None otherwise + """ + + object_store_get_iter = netapp_utils.zapi.NaElement('aggr-object-store-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'object-store-information', **{'object-store-name': self.parameters.get('object_store_name'), + 'aggregate': name}) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + object_store_get_iter.add_child_elem(query) + result = None + try: + result = self.server.invoke_successfully(object_store_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting object store: %s' % to_native(error), exception=traceback.format_exc()) + return result + + def get_object_store(self, name): + """ + Fetch details if object store attached to the given aggregate exists. + :return: + Dictionary of current details if object store attached to the given aggregate is found + None if object store is not found + """ + if self.use_rest: + return self.get_object_store_rest() + object_store_get = self.object_store_get_iter(name) + if object_store_get and object_store_get.get_child_by_name('num-records') and int(object_store_get.get_child_content('num-records')) >= 1: + attr = object_store_get.get_child_by_name('attributes-list').get_child_by_name('object-store-information') + return {'object_store_name': attr.get_child_content('object-store-name')} + return None + + def aggregate_online(self): + """ + Set state of an offline aggregate to online + :return: None + """ + if self.use_rest: + return self.patch_aggr_rest('make service state online for', {'state': 'online'}) + online_aggr = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-online', **{'aggregate': self.parameters['name'], + 'force-online': 'true'}) + try: + self.server.invoke_successfully(online_aggr, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' % + (self.parameters['name'], self.parameters['service_state'], to_native(error)), + exception=traceback.format_exc()) + + def aggregate_offline(self): + """ + Set state of an online aggregate to offline + :return: None + """ + if self.use_rest: + return self.patch_aggr_rest('make service state offline for', {'state': 'offline'}) + offline_aggr = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-offline', **{'aggregate': self.parameters['name'], + 'force-offline': 'false', + 'unmount-volumes': str(self.parameters.get('unmount_volumes', False))}) + + # if disk add operation is in progress, cannot offline aggregate, retry few times. + retry = 10 + while retry > 0: + try: + self.server.invoke_successfully(offline_aggr, enable_tunneling=True) + break + except netapp_utils.zapi.NaApiError as error: + if 'disk add operation is in progress' in to_native(error): + retry -= 1 + if retry > 0: + continue + self.module.fail_json(msg='Error changing the state of aggregate %s to %s: %s' % + (self.parameters['name'], self.parameters['service_state'], to_native(error)), + exception=traceback.format_exc()) + + @staticmethod + def get_disks_or_mirror_disks_object(name, disks): + ''' + create ZAPI object for disks or mirror_disks + ''' + disks_obj = netapp_utils.zapi.NaElement(name) + for disk in disks: + disk_info_obj = netapp_utils.zapi.NaElement('disk-info') + disk_info_obj.add_new_child('name', disk) + disks_obj.add_child_elem(disk_info_obj) + return disks_obj + + def create_aggr(self): + """ + Create aggregate + :return: None + """ + if self.use_rest: + return self.create_aggr_rest() + options = {'aggregate': self.parameters['name']} + if self.parameters.get('disk_class'): + options['disk-class'] = self.parameters['disk_class'] + if self.parameters.get('disk_type'): + options['disk-type'] = self.parameters['disk_type'] + if self.parameters.get('raid_type'): + options['raid-type'] = self.parameters['raid_type'] + if self.parameters.get('snaplock_type'): + options['snaplock-type'] = self.parameters['snaplock_type'] + if self.parameters.get('spare_pool'): + options['spare-pool'] = self.parameters['spare_pool'] + # int to str + if self.parameters.get('disk_count'): + options['disk-count'] = str(self.parameters['disk_count']) + if self.parameters.get('disk_size'): + options['disk-size'] = str(self.parameters['disk_size']) + if self.parameters.get('disk_size_with_unit'): + options['disk-size-with-unit'] = str(self.parameters['disk_size_with_unit']) + if self.parameters.get('raid_size'): + options['raid-size'] = str(self.parameters['raid_size']) + # boolean to str + if self.parameters.get('is_mirrored'): + options['is-mirrored'] = str(self.parameters['is_mirrored']).lower() + if self.parameters.get('ignore_pool_checks'): + options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks']).lower() + if self.parameters.get('encryption'): + options['encrypt-with-aggr-key'] = str(self.parameters['encryption']).lower() + aggr_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-create', **options) + if self.parameters.get('nodes'): + nodes_obj = netapp_utils.zapi.NaElement('nodes') + aggr_create.add_child_elem(nodes_obj) + for node in self.parameters['nodes']: + nodes_obj.add_new_child('node-name', node) + if self.parameters.get('disks'): + aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('disks', self.parameters.get('disks'))) + if self.parameters.get('mirror_disks'): + aggr_create.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', self.parameters.get('mirror_disks'))) + + try: + self.server.invoke_successfully(aggr_create, enable_tunneling=False) + if self.parameters.get('wait_for_online'): + # round off time_out + retries = (self.parameters['time_out'] + 5) / 10 + current = self.get_aggr() + status = None if current is None else current['service_state'] + while status != 'online' and retries > 0: + time.sleep(10) + retries = retries - 1 + current = self.get_aggr() + status = None if current is None else current['service_state'] + else: + current = self.get_aggr() + if current is not None and current.get('disk_count') != self.parameters.get('disk_count'): + self.module.warn("Aggregate created with mismatched disk_count: created %s not %s" + % (current.get('disk_count'), self.parameters.get('disk_count'))) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error provisioning aggregate %s: %s" + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_aggr(self): + """ + Delete aggregate. + :return: None + """ + if self.use_rest: + return self.delete_aggr_rest() + aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-destroy', **{'aggregate': self.parameters['name']}) + + try: + self.server.invoke_successfully(aggr_destroy, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def rename_aggregate(self): + """ + Rename aggregate. + """ + if self.use_rest: + return self.rename_aggr_rest() + aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-rename', **{'aggregate': self.parameters['from_name'], + 'new-aggregate-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(aggr_rename, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error renaming aggregate %s: %s" + % (self.parameters['from_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_aggr(self, modify): + """ + Modify state of the aggregate + :param modify: dictionary of parameters to be modified + :return: None + """ + # online aggregate first, so disk can be added after online. + if modify.get('service_state') == 'online': + self.aggregate_online() + # modify tags + if modify.get('tags') is not None: + self.patch_aggr_rest('modify tags for', {'_tags': modify['tags']}) + # add disk before taking aggregate offline. + disk_size = self.parameters.get('disk_size', 0) + disk_size_with_unit = self.parameters.get('disk_size_with_unit') + if modify.get('disk_count'): + self.add_disks(modify['disk_count'], disk_size=disk_size, disk_size_with_unit=disk_size_with_unit) + if modify.get('disks_to_add') or modify.get('mirror_disks_to_add'): + self.add_disks(0, modify.get('disks_to_add'), modify.get('mirror_disks_to_add')) + # offline aggregate after adding additional disks. + if modify.get('service_state') == 'offline': + self.aggregate_offline() + + def attach_object_store_to_aggr(self): + """ + Attach object store to aggregate. + :return: None + """ + if self.use_rest: + return self.attach_object_store_to_aggr_rest() + store_obj = {'aggregate': self.parameters['name'], 'object-store-name': self.parameters['object_store_name']} + if 'allow_flexgroups' in self.parameters: + store_obj['allow-flexgroup'] = self.na_helper.get_value_for_bool(False, self.parameters['allow_flexgroups']) + attach_object_store = netapp_utils.zapi.NaElement.create_node_with_children('aggr-object-store-attach', **store_obj) + + try: + self.server.invoke_successfully(attach_object_store, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error attaching object store %s to aggregate %s: %s" % + (self.parameters['object_store_name'], self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def add_disks(self, count=0, disks=None, mirror_disks=None, disk_size=0, disk_size_with_unit=None): + """ + Add additional disks to aggregate. + :return: None + """ + if self.use_rest: + return self.add_disks_rest(count, disks, mirror_disks, disk_size, disk_size_with_unit) + options = {'aggregate': self.parameters['name']} + if count: + options['disk-count'] = str(count) + if disks and self.parameters.get('ignore_pool_checks'): + options['ignore-pool-checks'] = str(self.parameters['ignore_pool_checks']) + if disk_size: + options['disk-size'] = str(disk_size) + if disk_size_with_unit: + options['disk-size-with-unit'] = disk_size_with_unit + if self.parameters.get('disk_class'): + options['disk-class'] = self.parameters['disk_class'] + if self.parameters.get('disk_type'): + options['disk-type'] = self.parameters['disk_type'] + aggr_add = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-add', **options) + if disks: + aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('disks', disks)) + if mirror_disks: + aggr_add.add_child_elem(self.get_disks_or_mirror_disks_object('mirror-disks', mirror_disks)) + + try: + self.server.invoke_successfully(aggr_add, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding additional disks to aggregate %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def map_plex_to_primary_and_mirror(self, plex_disks, disks, mirror_disks): + ''' + we have N plexes, and disks, and maybe mirror_disks + we're trying to find which plex is used for disks, and which one, if applicable, for mirror_disks + :return: a tuple with the names of the two plexes (disks_plex, mirror_disks_plex) + the second one can be None + ''' + disks_plex = None + mirror_disks_plex = None + error = '' + for plex in plex_disks: + common = set(plex_disks[plex]).intersection(set(disks)) + if common: + if disks_plex is None: + disks_plex = plex + else: + error = 'found overlapping plexes: %s and %s' % (disks_plex, plex) + if mirror_disks is not None: + common = set(plex_disks[plex]).intersection(set(mirror_disks)) + if common: + if mirror_disks_plex is None: + mirror_disks_plex = plex + else: + error = 'found overlapping mirror plexes: %s and %s' % (mirror_disks_plex, plex) + if not error: + # make sure we found a match + if disks_plex is None: + error = 'cannot match disks with current aggregate disks' + if mirror_disks is not None and mirror_disks_plex is None: + if error: + error += ', and ' + error += 'cannot match mirror_disks with current aggregate disks' + if error: + self.module.fail_json(msg="Error mapping disks for aggregate %s: %s. Found: %s" % + (self.parameters['name'], error, str(plex_disks))) + return disks_plex, mirror_disks_plex + + def get_disks_to_add(self, aggr_name, disks, mirror_disks): + ''' + Get list of disks used by the aggregate, as primary and mirror. + Report error if: + the plexes in use cannot be matched with user inputs (we expect some overlap) + the user request requires some disks to be removed (not supported) + : return: a tuple of two lists of disks: disks_to_add, mirror_disks_to_add + ''' + # let's see if we need to add disks + disks_in_use = self.get_aggr_disks(aggr_name) + # we expect a list of tuples (disk_name, plex_name), if there is a mirror, we should have 2 plexes + # let's get a list of disks for each plex + plex_disks = {} + for disk_name, plex_name in disks_in_use: + plex_disks.setdefault(plex_name, []).append(disk_name) + # find who is who + disks_plex, mirror_disks_plex = self.map_plex_to_primary_and_mirror(plex_disks, disks, mirror_disks) + # Now that we know what is which, find what needs to be removed (error), and what needs to be added + disks_to_remove = [disk for disk in plex_disks[disks_plex] if disk not in disks] + if mirror_disks_plex: + disks_to_remove.extend([disk for disk in plex_disks[mirror_disks_plex] if disk not in mirror_disks]) + if disks_to_remove: + error = 'these disks cannot be removed: %s' % str(disks_to_remove) + self.module.fail_json(msg="Error removing disks is not supported. Aggregate %s: %s. In use: %s" % + (aggr_name, error, str(plex_disks))) + # finally, what's to be added + disks_to_add = [disk for disk in disks if disk not in plex_disks[disks_plex]] + mirror_disks_to_add = [] + if mirror_disks_plex: + mirror_disks_to_add = [disk for disk in mirror_disks if disk not in plex_disks[mirror_disks_plex]] + if mirror_disks_to_add and not disks_to_add: + self.module.fail_json(msg="Error cannot add mirror disks %s without adding disks for aggregate %s. In use: %s" % + (str(mirror_disks_to_add), aggr_name, str(plex_disks))) + if disks_to_add or mirror_disks_to_add: + self.na_helper.changed = True + + return disks_to_add, mirror_disks_to_add + + def set_disk_count(self, current, modify): + if modify.get('disk_count'): + if int(modify['disk_count']) < int(current['disk_count']): + self.module.fail_json(msg="Error: specified disk_count is less than current disk_count. Only adding disks is allowed.") + else: + modify['disk_count'] = modify['disk_count'] - current['disk_count'] + + def get_aggr_actions(self): + aggr_name = self.parameters.get('name') + rename, cd_action, modify = None, None, {} + current = self.get_aggr() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # create by renaming existing aggregate + old_aggregate = self.get_aggr(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(old_aggregate, current) + if rename is None: + self.module.fail_json(msg='Error renaming aggregate %s: no aggregate with from_name %s.' + % (self.parameters['name'], self.parameters['from_name'])) + if rename: + current = old_aggregate + aggr_name = self.parameters['from_name'] + cd_action = None + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if 'encryption' in modify and not self.use_rest: + self.module.fail_json(msg='Error: modifying encryption is not supported with ZAPI.') + if 'snaplock_type' in modify: + self.module.fail_json(msg='Error: snaplock_type is not modifiable. Cannot change to: %s.' % modify['snaplock_type']) + if self.parameters.get('disks'): + modify['disks_to_add'], modify['mirror_disks_to_add'] = \ + self.get_disks_to_add(aggr_name, self.parameters['disks'], self.parameters.get('mirror_disks')) + self.set_disk_count(current, modify) + + return current, cd_action, rename, modify + + def get_object_store_action(self, current, rename): + object_store_cd_action = None + if self.parameters.get('object_store_name'): + aggr_name = self.parameters['from_name'] if rename else self.parameters['name'] + object_store_current = self.get_object_store(aggr_name) if current else None + object_store_cd_action = self.na_helper.get_cd_action(object_store_current, self.parameters.get('object_store_name')) + if object_store_cd_action is None and object_store_current is not None\ + and object_store_current['object_store_name'] != self.parameters.get('object_store_name'): + self.module.fail_json(msg='Error: object store %s is already associated with aggregate %s.' % + (object_store_current['object_store_name'], aggr_name)) + return object_store_cd_action + + def get_aggr_rest(self, name): + if not name: + return None + api = 'storage/aggregates' + query = {'name': name} + fields = 'uuid,state,block_storage.primary.disk_count,data_encryption,snaplock_type' + if 'tags' in self.parameters: + fields += ',_tags' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error: failed to get aggregate %s: %s' % (name, error)) + if record: + return { + 'tags': record.get('_tags', []), + 'disk_count': self.na_helper.safe_get(record, ['block_storage', 'primary', 'disk_count']), + 'encryption': self.na_helper.safe_get(record, ['data_encryption', 'software_encryption_enabled']), + 'service_state': record['state'], + 'snaplock_type': record['snaplock_type'], + 'uuid': record['uuid'], + } + return None + + def get_multiplier(self, unit): + if not unit: + return 1 + try: + return netapp_utils.POW2_BYTE_MAP[unit[0].lower()] + except KeyError: + self.module.fail_json(msg='Error: unexpected unit in disk_size_with_unit: %s' % self.parameters['disk_size_with_unit']) + + def get_disk_size(self): + if 'disk_size' in self.parameters: + return self.parameters['disk_size'] * 4 * 1024 + if 'disk_size_with_unit' in self.parameters: + match = re.match(r'([\d.]+)(.*)', self.parameters['disk_size_with_unit']) + if match: + size, unit = match.groups() + mul = self.get_multiplier(unit) + return int(float(size) * mul) + self.module.fail_json(msg='Error: unexpected value in disk_size_with_unit: %s' % self.parameters['disk_size_with_unit']) + return None + + def create_aggr_rest(self): + api = 'storage/aggregates' + + disk_size = self.get_disk_size() + # Interestingly, REST expects True/False in body, but 'true'/'false' in query + # I guess it's because we're using json in the body + query = {'return_records': 'true'} # in order to capture UUID + if disk_size: + query['disk_size'] = disk_size + # query = {'disk_size': disk_size} if disk_size else None + + body = {'name': self.parameters['name']} if 'name' in self.parameters else {} + block_storage = {} + primary = {} + if self.parameters.get('nodes'): + body['node.name'] = self.parameters['nodes'][0] + if self.parameters.get('disk_class'): + primary['disk_class'] = self.parameters['disk_class'] + if self.parameters.get('disk_count'): + primary['disk_count'] = self.parameters['disk_count'] + if self.parameters.get('raid_size'): + primary['raid_size'] = self.parameters['raid_size'] + if self.parameters.get('raid_type'): + primary['raid_type'] = self.parameters['raid_type'] + if primary: + block_storage['primary'] = primary + mirror = {} + if self.parameters.get('is_mirrored'): + mirror['enabled'] = self.parameters['is_mirrored'] + if mirror: + block_storage['mirror'] = mirror + if block_storage: + body['block_storage'] = block_storage + if self.parameters.get('encryption'): + body['data_encryption'] = {'software_encryption_enabled': True} + if self.parameters.get('snaplock_type'): + body['snaplock_type'] = self.parameters['snaplock_type'] + if self.parameters.get('tags') is not None: + body['_tags'] = self.parameters['tags'] + response, error = rest_generic.post_async(self.rest_api, api, body or None, query, job_timeout=self.parameters['time_out']) + if error: + self.module.fail_json(msg='Error: failed to create aggregate: %s' % error) + if response: + record, error = rrh.check_for_0_or_1_records(api, response, error, query) + if not error and record and 'uuid' not in record: + error = 'uuid key not present in %s:' % record + if error: + self.module.fail_json(msg='Error: failed to parse create aggregate response: %s' % error) + if record: + self.uuid = record['uuid'] + + def delete_aggr_rest(self): + api = 'storage/aggregates' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg='Error: failed to delete aggregate: %s' % error) + + def patch_aggr_rest(self, action, body, query=None): + api = 'storage/aggregates' + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body, query) + if error: + self.module.fail_json(msg='Error: failed to %s aggregate: %s' % (action, error)) + + def add_disks_rest(self, count=0, disks=None, mirror_disks=None, disk_size=0, disk_size_with_unit=None): + """ + Add additional disks to aggregate. + :return: None + """ + if disks or mirror_disks: + self.module.fail_json(msg='Error: disks or mirror disks are mot supported with rest: %s, %s.' % (disks, mirror_disks)) + if self.parameters.get('disk_class'): + self.module.warn('disk_class is ignored when adding disks to an exiting aggregate') + primary = {'disk_count': self.parameters['disk_count']} if count else None + body = {'block_storage': {'primary': primary}} if primary else None + if body: + disk_size = self.get_disk_size() + query = {'disk_size': disk_size} if disk_size else None + self.patch_aggr_rest('increase disk count for', body, query) + + def rename_aggr_rest(self): + body = {'name': self.parameters['name']} + self.patch_aggr_rest('rename', body) + + def get_object_store_rest(self): + '''TODO: support mirror in addition to primary''' + api = 'storage/aggregates/%s/cloud-stores' % self.uuid + record, error = rest_generic.get_one_record(self.rest_api, api, query={'primary': True}) + if error: + self.module.fail_json(msg='Error: failed to get cloud stores for aggregate: %s' % error) + return record + + def get_cloud_target_uuid_rest(self): + api = 'cloud/targets' + query = {'name': self.parameters['object_store_name']} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error or not record: + self.module.fail_json(msg='Error: failed to find cloud store with name %s: %s' % (self.parameters['object_store_name'], error)) + return record['uuid'] + + def attach_object_store_to_aggr_rest(self): + '''TODO: support mirror in addition to primary''' + + if self.uuid is None: + error = 'aggregate UUID is not set.' + self.module.fail_json(msg='Error: cannot attach cloud store with name %s: %s' % (self.parameters['object_store_name'], error)) + body = {'target': {'uuid': self.get_cloud_target_uuid_rest()}} + api = 'storage/aggregates/%s/cloud-stores' % self.uuid + query = None + if 'allow_flexgroups' in self.parameters: + query = {'allow_flexgroups': 'true' if self.parameters['allow_flexgroups'] else 'false'} + record, error = rest_generic.post_async(self.rest_api, api, body, query) + if error: + self.module.fail_json(msg='Error: failed to attach cloud store with name %s: %s' % (self.parameters['object_store_name'], error)) + return record + + def validate_expensive_options(self, cd_action, modify): + if cd_action == 'create' or (modify and 'disk_count' in modify): + # report an error if disk_size_with_unit is not valid + self.get_disk_size() + + def apply(self): + """ + Apply action to the aggregate + :return: None + """ + current, cd_action, rename, modify = self.get_aggr_actions() + if current: + self.uuid = current.get('uuid') + object_store_cd_action = self.get_object_store_action(current, rename) + + if self.na_helper.changed and self.module.check_mode: + # additional validations that are done at runtime + self.validate_expensive_options(cd_action, modify) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_aggr() + # offine aggregate after create. + if self.parameters.get('service_state') == 'offline': + self.modify_aggr({'service_state': 'offline'}) + elif cd_action == 'delete': + self.delete_aggr() + else: + if rename: + self.rename_aggregate() + if modify: + self.modify_aggr(modify) + if object_store_cd_action == 'create': + self.attach_object_store_to_aggr() + if rename: + modify['name'] = self.parameters['name'] + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Create Aggregate class instance and invoke apply + :return: None + """ + obj_aggr = NetAppOntapAggregate() + obj_aggr.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py new file mode 100644 index 000000000..00d02e314 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport.py @@ -0,0 +1,449 @@ +#!/usr/bin/python +""" +create Autosupport module to enable, disable or modify +""" + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_autosupport +short_description: NetApp ONTAP autosupport +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.7.0 +description: + - Enable/Disable Autosupport +author: NetApp Ansible Team (@carchi8py) +options: + state: + description: + - Specifies whether the AutoSupport daemon is present or absent. + - When this setting is absent, delivery of all AutoSupport messages is turned off. + choices: ['present', 'absent'] + type: str + default: present + node_name: + description: + - The name of the filer that owns the AutoSupport Configuration. + required: true + type: str + transport: + description: + - The name of the transport protocol used to deliver AutoSupport messages. + choices: ['http', 'https', 'smtp'] + type: str + noteto: + description: + - Specifies up to five recipients of short AutoSupport e-mail messages. + type: list + elements: str + post_url: + description: + - The URL used to deliver AutoSupport messages via HTTP POST. + type: str + mail_hosts: + description: + - List of mail server(s) used to deliver AutoSupport messages via SMTP. + - Both host names and IP addresses may be used as valid input. + type: list + elements: str + support: + description: + - Specifies whether AutoSupport notification to technical support is enabled. + type: bool + from_address: + description: + - specify the e-mail address from which the node sends AutoSupport messages. + version_added: 2.8.0 + type: str + partner_addresses: + description: + - Specifies up to five partner vendor recipients of full AutoSupport e-mail messages. + version_added: 2.8.0 + type: list + elements: str + to_addresses: + description: + - Specifies up to five recipients of full AutoSupport e-mail messages. + version_added: 2.8.0 + type: list + elements: str + proxy_url: + description: + - specify an HTTP or HTTPS proxy if the 'transport' parameter is set to HTTP or HTTPS and your organization uses a proxy. + - If authentication is required, use the format "username:password@host:port". + version_added: 2.8.0 + type: str + hostname_in_subject: + description: + - Specify whether the hostname of the node is included in the subject line of the AutoSupport message. + type: bool + version_added: 2.8.0 + nht_data_enabled: + description: + - Specify whether the disk health data is collected as part of the AutoSupport data. + type: bool + version_added: '21.5.0' + perf_data_enabled: + description: + - Specify whether the performance data is collected as part of the AutoSupport data. + type: bool + version_added: '21.5.0' + retry_count: + description: + - Specify the maximum number of delivery attempts for an AutoSupport message. + type: int + version_added: '21.5.0' + reminder_enabled: + description: + - Specify whether AutoSupport reminders are enabled or disabled. + type: bool + version_added: '21.5.0' + max_http_size: + description: + - Specify delivery size limit for the HTTP transport protocol (in bytes). + type: int + version_added: '21.5.0' + max_smtp_size: + description: + - Specify delivery size limit for the SMTP transport protocol (in bytes). + type: int + version_added: '21.5.0' + private_data_removed: + description: + - Specify the removal of customer-supplied data. + type: bool + version_added: '21.5.0' + local_collection_enabled: + description: + - Specify whether collection of AutoSupport data when the AutoSupport daemon is disabled. + type: bool + version_added: '21.5.0' + ondemand_enabled: + description: + - Specify whether the AutoSupport OnDemand Download feature is enabled. + type: bool + version_added: '21.5.0' + validate_digital_certificate: + description: + - When set to true each node will validate the digital certificates that it receives. + type: bool + version_added: '21.5.0' + """ + +EXAMPLES = """ + - name: Enable autosupport + netapp.ontap.na_ontap_autosupport: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: present + node_name: test + transport: https + noteto: abc@def.com,def@ghi.com + mail_hosts: 1.2.3.4,5.6.7.8 + support: False + post_url: url/1.0/post + - name: Modify autosupport proxy_url with password + netapp.ontap.na_ontap_autosupport: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: present + node_name: test + transport: https + proxy_url: username:password@host.com:8000 + - name: Modify autosupport proxy_url without password + netapp.ontap.na_ontap_autosupport: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: present + node_name: test + transport: https + proxy_url: username@host.com:8000 + - name: Disable autosupport + netapp.ontap.na_ontap_autosupport: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: absent + node_name: test +""" + +RETURN = """ +""" +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPasup: + """Class with autosupport methods""" + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + node_name=dict(required=True, type='str'), + transport=dict(required=False, type='str', choices=['smtp', 'http', 'https']), + noteto=dict(required=False, type='list', elements='str'), + post_url=dict(required=False, type='str'), + support=dict(required=False, type='bool'), + mail_hosts=dict(required=False, type='list', elements='str'), + from_address=dict(required=False, type='str'), + partner_addresses=dict(required=False, type='list', elements='str'), + to_addresses=dict(required=False, type='list', elements='str'), + # proxy_url may contain a password: user:password@url + proxy_url=dict(required=False, type='str', no_log=True), + hostname_in_subject=dict(required=False, type='bool'), + nht_data_enabled=dict(required=False, type='bool'), + perf_data_enabled=dict(required=False, type='bool'), + retry_count=dict(required=False, type='int'), + reminder_enabled=dict(required=False, type='bool'), + max_http_size=dict(required=False, type='int'), + max_smtp_size=dict(required=False, type='int'), + private_data_removed=dict(required=False, type='bool'), + local_collection_enabled=dict(required=False, type='bool'), + ondemand_enabled=dict(required=False, type='bool'), + validate_digital_certificate=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # present or absent requires modifying state to enabled or disabled + self.parameters['service_state'] = 'started' if self.parameters['state'] == 'present' else 'stopped' + self.set_playbook_zapi_key_map() + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_string_keys = { + 'node_name': 'node-name', + 'transport': 'transport', + 'post_url': 'post-url', + 'from_address': 'from', + 'proxy_url': 'proxy-url' + } + self.na_helper.zapi_int_keys = { + 'retry_count': 'retry-count', + 'max_http_size': 'max-http-size', + 'max_smtp_size': 'max-smtp-size' + } + self.na_helper.zapi_list_keys = { + 'noteto': ('noteto', 'mail-address'), + 'mail_hosts': ('mail-hosts', 'string'), + 'partner_addresses': ('partner-address', 'mail-address'), + 'to_addresses': ('to', 'mail-address') + } + self.na_helper.zapi_bool_keys = { + 'support': 'is-support-enabled', + 'hostname_in_subject': 'is-node-in-subject', + 'nht_data_enabled': 'is-nht-data-enabled', + 'perf_data_enabled': 'is-perf-data-enabled', + 'reminder_enabled': 'is-reminder-enabled', + 'private_data_removed': 'is-private-data-removed', + 'local_collection_enabled': 'is-local-collection-enabled', + 'ondemand_enabled': 'is-ondemand-enabled', + 'validate_digital_certificate': 'validate-digital-certificate' + } + + def get_autosupport_config(self): + """ + get current autosupport details + :return: dict() + """ + asup_info = {} + if self.use_rest: + api = "private/cli/system/node/autosupport" + query = { + 'node': self.parameters['node_name'], + 'fields': 'state,node,transport,noteto,url,support,mail-hosts,from,partner-address,to,proxy-url,hostname-subj,nht,perf,retry-count,\ +reminder,max-http-size,max-smtp-size,remove-private-data,ondemand-server-url,support,reminder,ondemand-state,local-collection,validate-digital-certificate' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + + if error: + self.module.fail_json(msg='Error fetching info: %s' % error) + + for param in ('transport', 'mail_hosts', 'proxy_url', 'retry_count', + 'max_http_size', 'max_smtp_size', 'noteto', 'validate_digital_certificate'): + if param in record: + asup_info[param] = record[param] + + asup_info['support'] = record['support'] in ['enable', True] + asup_info['node_name'] = record['node'] if 'node' in record else "" + asup_info['post_url'] = record['url'] if 'url' in record else "" + asup_info['from_address'] = record['from'] if 'from' in record else "" + asup_info['to_addresses'] = record['to'] if 'to' in record else list() + asup_info['hostname_in_subject'] = record['hostname_subj'] if 'hostname_subj' in record else False + asup_info['nht_data_enabled'] = record['nht'] if 'nht' in record else False + asup_info['perf_data_enabled'] = record['perf'] if 'perf' in record else False + asup_info['reminder_enabled'] = record['reminder'] if 'reminder' in record else False + asup_info['private_data_removed'] = record['remove_private_data'] if 'remove_private_data' in record else False + asup_info['local_collection_enabled'] = record['local_collection'] if 'local_collection' in record else False + asup_info['ondemand_enabled'] = record['ondemand_state'] in ['enable', True] if 'ondemand_state' in record else False + asup_info['service_state'] = 'started' if record['state'] in ['enable', True] else 'stopped' + asup_info['partner_addresses'] = record['partner_address'] if 'partner_address' in record else list() + else: + asup_details = netapp_utils.zapi.NaElement('autosupport-config-get') + asup_details.add_new_child('node-name', self.parameters['node_name']) + try: + result = self.server.invoke_successfully(asup_details, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching info: %s' % to_native(error), exception=traceback.format_exc()) + # zapi invoke successful + asup_attr_info = result.get_child_by_name('attributes').get_child_by_name('autosupport-config-info') + asup_info['service_state'] = 'started' if asup_attr_info['is-enabled'] == 'true' else 'stopped' + for item_key, zapi_key in self.na_helper.zapi_string_keys.items(): + value = asup_attr_info.get_child_content(zapi_key) + asup_info[item_key] = value if value is not None else "" + for item_key, zapi_key in self.na_helper.zapi_int_keys.items(): + value = asup_attr_info.get_child_content(zapi_key) + if value is not None: + asup_info[item_key] = self.na_helper.get_value_for_int(from_zapi=True, value=value) + for item_key, zapi_key in self.na_helper.zapi_bool_keys.items(): + value = asup_attr_info.get_child_content(zapi_key) + if value is not None: + asup_info[item_key] = self.na_helper.get_value_for_bool(from_zapi=True, value=value) + for item_key, zapi_key in self.na_helper.zapi_list_keys.items(): + parent, dummy = zapi_key + asup_info[item_key] = self.na_helper.get_value_for_list(from_zapi=True, zapi_parent=asup_attr_info.get_child_by_name(parent)) + + return asup_info + + def modify_autosupport_config(self, modify): + """ + modify autosupport config + @return: modfied attributes / FAILURE with an error_message + """ + + if self.use_rest: + api = "private/cli/system/node/autosupport" + query = { + 'node': self.parameters['node_name'] + } + if 'service_state' in modify: + modify['state'] = modify['service_state'] == 'started' + del modify['service_state'] + + if 'post_url' in modify: + modify['url'] = modify.pop('post_url') + if 'from_address' in modify: + modify['from'] = modify.pop('from_address') + if 'to_addresses' in modify: + modify['to'] = modify.pop('to_addresses') + if 'hostname_in_subject' in modify: + modify['hostname_subj'] = modify.pop('hostname_in_subject') + if 'nht_data_enabled' in modify: + modify['nht'] = modify.pop('nht_data_enabled') + if 'perf_data_enabled' in modify: + modify['perf'] = modify.pop('perf_data_enabled') + if 'reminder_enabled' in modify: + modify['reminder'] = modify.pop('reminder_enabled') + if 'private_data_removed' in modify: + modify['remove_private_data'] = modify.pop('private_data_removed') + if 'local_collection_enabled' in modify: + modify['local_collection'] = modify.pop('local_collection_enabled') + if 'ondemand_enabled' in modify: + modify['ondemand_state'] = modify.pop('ondemand_enabled') + if 'partner_addresses' in modify: + modify['partner_address'] = modify.pop('partner_addresses') + + dummy, error = rest_generic.patch_async(self.rest_api, api, None, modify, query) + + if error: + self.module.fail_json(msg='Error modifying asup: %s' % error) + else: + asup_details = {'node-name': self.parameters['node_name']} + if modify.get('service_state'): + asup_details['is-enabled'] = 'true' if modify.get('service_state') == 'started' else 'false' + asup_config = netapp_utils.zapi.NaElement('autosupport-config-modify') + for item_key in modify: + if item_key in self.na_helper.zapi_string_keys: + zapi_key = self.na_helper.zapi_string_keys.get(item_key) + asup_details[zapi_key] = modify[item_key] + elif item_key in self.na_helper.zapi_int_keys: + zapi_key = self.na_helper.zapi_int_keys.get(item_key) + asup_details[zapi_key] = modify[item_key] + elif item_key in self.na_helper.zapi_bool_keys: + zapi_key = self.na_helper.zapi_bool_keys.get(item_key) + asup_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, value=modify[item_key]) + elif item_key in self.na_helper.zapi_list_keys: + parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key) + asup_config.add_child_elem(self.na_helper.get_value_for_list( + from_zapi=False, zapi_parent=parent_key, zapi_child=child_key, data=modify.get(item_key))) + + asup_config.translate_struct(asup_details) + try: + return self.server.invoke_successfully(asup_config, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying asup: %s' % to_native(error), exception=traceback.format_exc()) + + @staticmethod + def strip_password(url): + ''' if url matches user:password@address return user@address + otherwise return None + ''' + if url: + needle = r'(.*):(.*)@(.*)' + matched = re.match(needle, url) + if matched: + return matched.group(1, 3) + return None, None + + def idempotency_check(self, current, modify): + sanitized_modify = dict(modify) + if 'proxy_url' in modify: + user_url_m = self.strip_password(modify['proxy_url']) + user_url_c = self.strip_password(current.get('proxy_url')) + if user_url_m == user_url_c and user_url_m != (None, None): + # change in password, it can be a false positive as password is replaced with ********* by ONTAP + self.module.warn('na_ontap_autosupport is not idempotent because the password value in proxy_url cannot be compared.') + if user_url_m != (None, None): + # password was found in proxy_url, sanitize it, use something different than ZAPI ********* + sanitized_modify['proxy_url'] = "%s:XXXXXXXX@%s" % user_url_m + return sanitized_modify + + def apply(self): + """ + Apply action to autosupport + """ + current = self.get_autosupport_config() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + sanitized_modify = self.idempotency_check(current, modify) + if self.na_helper.changed and not self.module.check_mode: + self.modify_autosupport_config(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=sanitized_modify) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + asup_obj = NetAppONTAPasup() + asup_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py new file mode 100644 index 000000000..1f1f109d3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py @@ -0,0 +1,188 @@ +#!/usr/bin/python + +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_autosupport_invoke +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' + +module: na_ontap_autosupport_invoke +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp ONTAP send AutoSupport message +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.4.0' +description: + - Send an AutoSupport message from a node + +options: + + name: + description: + - The name of the node to send the message to. + - Not specifying this option invokes AutoSupport on all nodes in the cluster. + type: str + + autosupport_message: + description: + - Text sent in the subject line of the AutoSupport message. + - message is deprecated and will be removed to avoid a conflict with an Ansible internal variable. + type: str + aliases: + - message + version_added: 20.8.0 + + type: + description: + - Type of AutoSupport Collection to Issue. + choices: ['test', 'performance', 'all'] + default: 'all' + type: str + + uri: + description: + - send the AutoSupport message to the destination you specify instead of the configured destination. + type: str + +''' + +EXAMPLES = ''' + - name: Send message + na_ontap_autosupport_invoke: + name: node1 + autosupport_message: invoked test autosupport rest + uri: http://1.2.3.4/delivery_uri + type: test + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPasupInvoke(object): + ''' send ASUP message ''' + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=False, type='str'), + autosupport_message=dict(required=False, type='str', aliases=["message"]), + type=dict(required=False, choices=[ + 'test', 'performance', 'all'], default='all'), + uri=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if 'message' in self.parameters: + self.module.warn('Error: "message" option conflicts with Ansible internal variable - please use "autosupport_message".') + + # REST API should be used for ONTAP 9.6 or higher. + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + else: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_nodes(self): + nodes = [] + node_obj = netapp_utils.zapi.NaElement('system-node-get-iter') + desired_attributes = netapp_utils.zapi.NaElement('desired-attributes') + node_details_info = netapp_utils.zapi.NaElement('node-details-info') + node_details_info.add_new_child('node', '') + desired_attributes.add_child_elem(node_details_info) + node_obj.add_child_elem(desired_attributes) + try: + result = self.server.invoke_successfully(node_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + node_info = result.get_child_by_name('attributes-list') + if node_info is not None: + nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()] + return nodes + + def send_zapi_message(self, params, node_name): + params['node-name'] = node_name + send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params) + try: + self.server.invoke_successfully(send_message, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error on sending autosupport message to node %s: %s." + % (node_name, to_native(error)), + exception=traceback.format_exc()) + + def send_message(self): + params = {} + if self.parameters.get('autosupport_message'): + params['message'] = self.parameters['autosupport_message'] + if self.parameters.get('type'): + params['type'] = self.parameters['type'] + if self.parameters.get('uri'): + params['uri'] = self.parameters['uri'] + + if self.use_rest: + if self.parameters.get('name'): + params['node.name'] = self.parameters['name'] + node_name = params['node.name'] + else: + node_name = '*' + api = 'support/autosupport/messages' + dummy, error = self.rest_api.post(api, params) + if error is not None: + self.module.fail_json(msg="Error on sending autosupport message to node %s: %s." + % (node_name, error)) + else: + if self.parameters.get('name'): + node_names = [self.parameters['name']] + else: + # simulate REST behavior by sending to all nodes in the cluster + node_names = self.get_nodes() + for name in node_names: + self.send_zapi_message(params, name) + + def apply(self): + if not self.module.check_mode: + self.send_message() + self.module.exit_json(changed=True) + + +def main(): + message = NetAppONTAPasupInvoke() + message.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py new file mode 100644 index 000000000..6c0fa63a0 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_bgp_peer_group.py @@ -0,0 +1,356 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_bgp_peer_group +short_description: NetApp ONTAP module to create, modify or delete bgp peer group. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.0.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Create, modify or delete bgp peer group. +options: + state: + description: + - Create or delete BGP peer group. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - Name of the BGP peer group. + type: str + required: true + from_name: + description: + - Name of the existing BGP peer group to be renamed to C(name). + type: str + ipspace: + description: + - IPSpace name, cannot be modified after creation. + type: str + local: + description: + - Information describing the local interface that is being used to peer with a router using BGP. + - When creating BGP peer group, an existing BGP interface is used by specifying the interface, or create a new one by specifying the port and IP address. + - Cannot be modified after creation. + type: dict + suboptions: + interface: + description: + - An existing BGP interface. + - If interface not found, module will try to create BGP interface using C(local.ip) and C(local.port). + type: dict + suboptions: + name: + description: + - BGP interface name. + type: str + ip: + description: + - IP information, requird to create a new interface. + type: dict + suboptions: + address: + description: + - IPv4 or IPv6 address, example 10.10.10.7. + type: str + netmask: + description: + - Input as netmask length (16) or IPv4 mask (255.255.0.0). + - For IPv6, the default value is 64 with a valid range of 1 to 127. + type: str + port: + description: + - Port and node information, required to create a new interface. + type: dict + suboptions: + name: + description: + - Port name. + type: str + node: + description: + - Name of node on which the port is located. + type: dict + suboptions: + name: + description: + - Node name + type: str + peer: + description: + - Information describing the router to peer with + type: dict + suboptions: + address: + description: + - Peer router address. + type: str + asn: + description: + - Autonomous system number of peer. + - Cannot be modified after creation. + type: int +""" + +EXAMPLES = """ + - name: Create BGP peer group with existing bgp interface bgp_lif. + netapp.ontap.na_ontap_bgp_peer_group: + name: peer_group + ipspace: Default + local: + interface: + name: bgp_lif + peer: + address: 10.10.10.19 + asn: 65501 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Create new BGP interface new_bgp_lif and BGP peer group peer_group_1. + netapp.ontap.na_ontap_bgp_peer_group: + name: peer_group_1 + ipspace: Default + local: + interface: + name: new_bgp_lif + ip: + address: 10.10.10.20 + netmask: 24 + port: + name: e0a + node: + name: ontap98-01 + peer: + address: 10.10.10.20 + asn: 65500 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + # this will create bgp interface with random name. + - name: Create BGP interface without interface name and BGP peer group peer_group_2. + netapp.ontap.na_ontap_bgp_peer_group: + name: peer_group_2 + ipspace: Default + local: + ip: + address: 10.10.10.22 + netmask: 24 + port: + name: e0a + node: + name: ontap98-01 + peer: + address: 10.10.10.22 + asn: 65512 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Modify peer address. + netapp.ontap.na_ontap_bgp_peer_group: + name: peer_group_2 + ipspace: Default + peer: + address: 10.10.55.22 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Rename BGP peer group name and modify peer address. + netapp.ontap.na_ontap_bgp_peer_group: + from_name: peer_group_2 + name: new_peer_group + ipspace: Default + peer: + address: 10.10.55.40 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Delete BGP peer group. + netapp.ontap.na_ontap_bgp_peer_group: + name: new_peer_group + ipspace: Default + state: absent + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress + + +class NetAppOntapBgpPeerGroup: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + ipspace=dict(required=False, type='str'), + local=dict(required=False, type='dict', options=dict( + interface=dict(required=False, type='dict', options=dict( + name=dict(required=False, type='str'), + )), + ip=dict(required=False, type='dict', options=dict( + address=dict(required=False, type='str'), + netmask=dict(required=False, type='str') + )), + port=dict(required=False, type='dict', options=dict( + name=dict(required=False, type='str'), + node=dict(required=False, type='dict', options=dict( + name=dict(required=False, type='str') + )) + )) + )), + peer=dict(required=False, type='dict', options=dict( + address=dict(required=False, type='str'), + asn=dict(required=False, type='int') + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + if self.na_helper.safe_get(self.parameters, ['peer', 'address']): + self.parameters['peer']['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters['peer']['address'], self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_bgp_peer_group', 9, 7) + self.parameters = self.na_helper.filter_out_none_entries(self.parameters) + + def get_bgp_peer_group(self, name=None): + """ + Get BGP peer group. + """ + if name is None: + name = self.parameters['name'] + api = 'network/ip/bgp/peer-groups' + query = { + 'name': name, + 'fields': 'name,uuid,peer' + } + if 'ipspace' in self.parameters: + query['ipspace.name'] = self.parameters['ipspace'] + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching BGP peer group %s: %s' % (name, to_native(error)), + exception=traceback.format_exc()) + if record: + self.uuid = record['uuid'] + return { + 'name': self.na_helper.safe_get(record, ['name']), + 'peer': self.na_helper.safe_get(record, ['peer']) + } + return None + + def create_bgp_peer_group(self): + """ + Create BGP peer group. + """ + api = 'network/ip/bgp/peer-groups' + body = { + 'name': self.parameters['name'], + 'local': self.parameters['local'], + 'peer': self.parameters['peer'] + } + if 'ipspace' in self.parameters: + body['ipspace.name'] = self.parameters['ipspace'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating BGP peer group %s: %s.' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_bgp_peer_group(self, modify): + """ + Modify BGP peer group. + """ + api = 'network/ip/bgp/peer-groups' + body = {} + if 'name' in modify: + body['name'] = modify['name'] + if 'peer' in modify: + body['peer'] = modify['peer'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + name = self.parameters['from_name'] if 'name' in modify else self.parameters['name'] + self.module.fail_json(msg='Error modifying BGP peer group %s: %s.' % (name, to_native(error)), + exception=traceback.format_exc()) + + def delete_bgp_peer_group(self): + """ + Delete BGP peer group. + """ + api = 'network/ip/bgp/peer-groups' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg='Error deleting BGP peer group %s: %s.' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_bgp_peer_group() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = None + if cd_action == 'create': + if self.parameters.get('from_name'): + current = self.get_bgp_peer_group(self.parameters['from_name']) + if not current: + self.module.fail_json(msg="Error renaming BGP peer group, %s does not exist." % self.parameters['from_name']) + cd_action = None + elif not self.parameters.get('local') or not self.parameters.get('peer'): + self.module.fail_json(msg="Error creating BGP peer group %s, local and peer are required in create." % self.parameters['name']) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.safe_get(modify, ['peer', 'asn']): + self.module.fail_json(msg="Error: cannot modify peer asn.") + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_bgp_peer_group() + elif cd_action == 'delete': + self.delete_bgp_peer_group() + else: + self.modify_bgp_peer_group(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + bgp_obj = NetAppOntapBgpPeerGroup() + bgp_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py new file mode 100644 index 000000000..ef74d1705 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain.py @@ -0,0 +1,690 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_broadcast_domain +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_broadcast_domain +short_description: NetApp ONTAP manage broadcast domains. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Modify a ONTAP broadcast domain. +options: + state: + description: + - Whether the specified broadcast domain should exist or not. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - Specify the broadcast domain name. + required: true + aliases: + - broadcast_domain + type: str + from_name: + description: + - Specify the broadcast domain name to be split into new broadcast domain. + version_added: 2.8.0 + type: str + mtu: + description: + - Specify the required mtu for the broadcast domain. + type: int + ipspace: + description: + - Specify the required ipspace for the broadcast domain. + - With ZAPI, a domain ipspace cannot be modified after the domain has been created. + - With REST, a domain ipspace can be modified. + type: str + from_ipspace: + description: + - if used with C(from_name), it will try to find broadcast domain C(from_name) in C(from_ipspace), split action either rename broadcast_domain and + ipspace or create a new broadcast domain. + - If not C(from_name) present, it will try to find C(name) broadcast domain in C(from_ipspace) and modify ipspace using C(ipspace). + - Only supported with REST. + version_added: 2.15.0 + type: str + ports: + description: + - Specify the ports associated with this broadcast domain. Should be comma separated. + - It represents the expected state of a list of ports at any time. + - Add a port if it is specified in expected state but not in current state. + - Delete a port if it is specified in current state but not in expected state. + - For split action, it represents the ports to be split from current broadcast domain and added to the new broadcast domain. + - If all ports are removed or split from a broadcast domain, the broadcast domain will be deleted automatically. + - With REST, if exact match of ports found with C(from_name), split action will rename the broadcast domain using C(name). + - With REST, if partial match of ports with C(from_name), split action will create a new broadcast domain using C(name) and + move partial matched ports from C(from_name) to C(name). + - With REST, if C(ports) not in C(from_name), split action will create a new broadcast domain using C(name) with C(ports). + type: list + elements: str +''' + +EXAMPLES = """ + - name: create broadcast domain + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: ansible_domain + mtu: 1000 + ipspace: Default + ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"] + - name: modify broadcast domain + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: ansible_domain + mtu: 1100 + ipspace: Default + ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"] + - name: split broadcast domain + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + from_name: ansible_domain + name: new_ansible_domain + mtu: 1200 + ipspace: Default + ports: khutton-vsim1:e0d-12 + - name: delete broadcast domain + netapp.ontap.na_ontap_broadcast_domain: + state: absent + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: ansible_domain + ipspace: Default + - name: create broadcast domain REST + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: ansible_domain + mtu: 1200 + ipspace: Default + ports: ["khutton-vsim1:e0d-12","khutton-vsim1:e0d-13","khutton-vsim1:e0d-14"] + - name: rename broadcast domain if exact match of ports REST + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + from_name: ansible_domain + name: new_ansible_domain + mtu: 1200 + ipspace: Default + ports: ["khutton-vsim1:e0d-12","khutton-vsim1:e0d-13","khutton-vsim1:e0d-14"] + - name: if partial match, remove e0d-12 from new_ansible_domain & create new domain ansible_domain with port e0d-12 REST + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + from_name: new_ansible_domain + name: ansible_domain + mtu: 1200 + ipspace: Default + ports: ["khutton-vsim1:e0d-12"] + - name: Modify both broadcast domain and ipspace REST. + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + from_name: ansible_domain + from_ipspace: Default + name: ansible_domain_ip1 + ipspace: ipspace_1 + mtu: 1200 + ports: ["khutton-vsim1:e0d-12"] + - name: Modify ipspace only REST. + netapp.ontap.na_ontap_broadcast_domain: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + from_ipspace: ipspace_1 + name: ansible_domain_ip1 + ipspace: Default + mtu: 1200 + ports: ["khutton-vsim1:e0d-12"] + - name: delete broadcast domain new_ansible_domain. + netapp.ontap.na_ontap_broadcast_domain: + state: absent + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: new_ansible_domain + mtu: 1200 + ipspace: Default + ports: ["khutton-vsim1:e0d-13","khutton-vsim1:e0d-14"] +""" + +RETURN = """ + + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapBroadcastDomain(object): + """ + Create, Modifies and Destroys a Broadcast domain + """ + def __init__(self): + """ + Initialize the ONTAP Broadcast Domain class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str', aliases=["broadcast_domain"]), + ipspace=dict(required=False, type='str'), + mtu=dict(required=False, type='int'), + ports=dict(required=False, type='list', elements='str'), + from_name=dict(required=False, type='str'), + from_ipspace=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.desired_ports = None + + if self.use_rest and 'ipspace' not in self.parameters: + error_msg = "Error: ipspace space is a required option with REST" + self.module.fail_json(msg=error_msg) + + if 'ports' in self.parameters: + self.parameters['ports'] = list(set([port.strip() for port in self.parameters['ports']])) + if self.use_rest: + self.desired_ports = self.get_ports_rest(self.parameters['ports']) + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + if 'from_ipspace' in self.parameters: + self.parameters.pop('from_ipspace') + self.module.warn("from_ipspace is ignored when ZAPI is used.") + + def get_broadcast_domain(self, broadcast_domain=None, ipspace=None): + """ + Return details about the broadcast domain + :param broadcast_domain: specific broadcast domain to get. + :return: Details about the broadcast domain. None if not found. + :rtype: dict + """ + if broadcast_domain is None: + broadcast_domain = self.parameters['name'] + if ipspace is None: + # unlike rest, ipspace is not mandatory field for zapi. + ipspace = self.parameters.get('ipspace') + if self.use_rest: + return self.get_broadcast_domain_rest(broadcast_domain, ipspace) + domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter') + broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info') + broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain) + if ipspace: + broadcast_domain_info.add_new_child('ipspace', ipspace) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(broadcast_domain_info) + domain_get_iter.add_child_elem(query) + result = self.server.invoke_successfully(domain_get_iter, True) + domain_exists = None + # check if broadcast_domain exists + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + domain_info = result.get_child_by_name('attributes-list').\ + get_child_by_name('net-port-broadcast-domain-info') + domain_name = domain_info.get_child_content('broadcast-domain') + domain_mtu = domain_info.get_child_content('mtu') + domain_ipspace = domain_info.get_child_content('ipspace') + domain_ports = domain_info.get_child_by_name('ports') + if domain_ports is not None: + ports = [port.get_child_content('port') for port in domain_ports.get_children()] + else: + ports = [] + domain_exists = { + 'domain-name': domain_name, + 'mtu': int(domain_mtu), + 'ipspace': domain_ipspace, + 'ports': ports + } + return domain_exists + + def get_broadcast_domain_rest(self, broadcast_domain, ipspace): + api = 'network/ethernet/broadcast-domains' + query = {'name': broadcast_domain, 'ipspace.name': ipspace} + fields = 'uuid,name,ipspace,ports,mtu' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + if record: + current = { + 'name': record['name'], + 'mtu': record['mtu'], + 'ipspace': record['ipspace']['name'], + 'uuid': record['uuid'], + 'ports': [] + } + if 'ports' in record: + current['ports'] = ['%s:%s' % (port['node']['name'], port['name']) for port in record['ports']] + return current + return None + + def create_broadcast_domain(self, ports=None): + """ + Creates a new broadcast domain + """ + if self.use_rest: + return self.create_broadcast_domain_rest(ports) + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-create') + domain_obj.add_new_child("broadcast-domain", self.parameters['name']) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + if self.parameters.get('mtu'): + domain_obj.add_new_child("mtu", str(self.parameters['mtu'])) + if self.parameters.get('ports'): + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in self.parameters['ports']: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating broadcast domain %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def create_broadcast_domain_rest(self, ports=None): + api = 'network/ethernet/broadcast-domains' + body = { + 'name': self.parameters['name'], + 'mtu': self.parameters['mtu'], + 'ipspace': self.parameters['ipspace'] + } + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg=error) + if ports: + self.add_or_move_broadcast_domain_ports_rest(ports) + + def delete_broadcast_domain(self, broadcast_domain=None, current=None): + """ + Deletes a broadcast domain + """ + if self.use_rest: + # all ports should be removed to delete broadcast domain in rest. + if 'ports' in current: + self.remove_broadcast_domain_ports_rest(current['ports'], current['ipspace']) + api = 'network/ethernet/broadcast-domains' + dummy, error = rest_generic.delete_async(self.rest_api, api, current['uuid']) + if error: + self.module.fail_json(msg=error) + else: + if broadcast_domain is None: + broadcast_domain = self.parameters['name'] + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-destroy') + domain_obj.add_new_child("broadcast-domain", broadcast_domain) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + try: + self.server.invoke_successfully(domain_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting broadcast domain %s: %s' % + (broadcast_domain, to_native(error)), + exception=traceback.format_exc()) + + def modify_broadcast_domain(self): + """ + Modifies ipspace and mtu options of a broadcast domain + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-modify') + domain_obj.add_new_child("broadcast-domain", self.parameters['name']) + if self.parameters.get('mtu'): + domain_obj.add_new_child("mtu", str(self.parameters['mtu'])) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + try: + self.server.invoke_successfully(domain_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying broadcast domain %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def split_broadcast_domain(self): + """ + split broadcast domain + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-split') + domain_obj.add_new_child("broadcast-domain", self.parameters['from_name']) + domain_obj.add_new_child("new-broadcast-domain", self.parameters['name']) + if self.parameters.get('ports'): + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in self.parameters['ports']: + ports_obj.add_new_child('net-qualified-port-name', port) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + try: + self.server.invoke_successfully(domain_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error splitting broadcast domain %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if len(self.get_broadcast_domain_ports(self.parameters['from_name'])) == 0: + self.delete_broadcast_domain(self.parameters['from_name']) + + def modify_broadcast_domain_or_ports(self, modify, current=None): + """ + :param modify: modify attributes. + """ + modify_keys = list(modify.keys()) + domain_modify_options = ['mtu', 'name', 'ipspace'] + if any(x in modify_keys for x in domain_modify_options): + if self.use_rest: + if modify.get('ports'): + del modify['ports'] + self.modify_broadcast_domain_rest(current['uuid'], modify) + # update current ipspace as it required in modifying ports later. + if modify.get('ipspace'): + current['ipspace'] = modify['ipspace'] + else: + self.modify_broadcast_domain() + if 'ports' in modify_keys: + self.modify_broadcast_domain_ports(current) + + def get_modify_attributes(self, current, split): + """ + :param current: current state. + :param split: True or False of split action. + :return: list of modified attributes. + """ + modify = None + if self.parameters['state'] == 'present': + # split already handled ipspace and ports. + if self.parameters.get('from_name'): + if split: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify.get('ports'): + del modify['ports'] + else: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + return modify + + def modify_broadcast_domain_ports(self, current=None): + """ + compare current and desired ports. Call add or remove ports methods if needed. + :return: None. + """ + if self.use_rest: + current_ports = current['ports'] + else: + current_ports = self.get_broadcast_domain_ports() + expect_ports = self.parameters['ports'] + # if want to remove all ports, simply delete the broadcast domain. + if len(expect_ports) == 0: + self.delete_broadcast_domain(current=current) + return + ports_to_remove = list(set(current_ports) - set(expect_ports)) + ports_to_add = list(set(expect_ports) - set(current_ports)) + + if len(ports_to_add) > 0: + if self.use_rest: + ports = self.get_ports_rest(ports_to_add) + if ports: + self.add_or_move_broadcast_domain_ports_rest(ports) + else: + self.add_broadcast_domain_ports(ports_to_add) + + if len(ports_to_remove) > 0: + if self.use_rest: + self.remove_broadcast_domain_ports_rest(ports_to_remove, current['ipspace']) + else: + self.delete_broadcast_domain_ports(ports_to_remove) + + def add_broadcast_domain_ports(self, ports): + """ + Creates new broadcast domain ports + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports') + domain_obj.add_new_child("broadcast-domain", self.parameters['name']) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + if ports: + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in ports: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + return True + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_broadcast_domain_ports(self, ports): + """ + Deletes broadcast domain ports + :param: ports to be deleted. + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports') + domain_obj.add_new_child("broadcast-domain", self.parameters['name']) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + if ports: + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in ports: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + return True + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_broadcast_domain_ports(self, broadcast_domain=None): + """ + Return details about the broadcast domain ports. + :return: Details about the broadcast domain ports. None if not found. + :rtype: list + """ + if broadcast_domain is None: + broadcast_domain = self.parameters['name'] + domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter') + broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info') + broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain) + if self.parameters.get('ipspace'): + broadcast_domain_info.add_new_child('ipspace', self.parameters['ipspace']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(broadcast_domain_info) + domain_get_iter.add_child_elem(query) + result = self.server.invoke_successfully(domain_get_iter, True) + ports = [] + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info') + domain_ports = domain_info.get_child_by_name('ports') + if domain_ports is not None: + ports = [port.get_child_content('port') for port in domain_ports.get_children()] + return ports + + def modify_broadcast_domain_rest(self, uuid, modify): + api = 'network/ethernet/broadcast-domains' + body = {} + # rename broadcast domain. + if 'name' in modify: + body['name'] = modify['name'] + if 'ipspace' in modify: + body['ipspace.name'] = modify['ipspace'] + if 'mtu' in modify: + body['mtu'] = modify['mtu'] + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg=error) + + def add_or_move_broadcast_domain_ports_rest(self, ports): + api = 'network/ethernet/ports' + body = { + 'broadcast_domain': { + 'name': self.parameters['name'], + 'ipspace': {'name': self.parameters['ipspace']} + } + } + for port in ports: + dummy, error = rest_generic.patch_async(self.rest_api, api, port['uuid'], body) + if error: + self.module.fail_json(msg=error) + + def remove_broadcast_domain_ports_rest(self, ports, ipspace): + body = {'ports': ports} + api = "private/cli/network/port/broadcast-domain/remove-ports" + query = {'broadcast-domain': self.parameters['name'], 'ipspace': ipspace} + response, error = rest_generic.patch_async(self.rest_api, api, None, body, query) + if error: + self.module.fail_json(msg='Error removing ports: %s' % error) + + def get_ports_rest(self, ports): + # if desired ports with uuid present then return only the ports to add or move. + if self.desired_ports: + return self.ports_to_add_move_from_desired(ports) + # list of desired ports not present in the node. + missing_ports = [] + # list of uuid information of each desired port should present in broadcast domain. + desired_ports = [] + for port in ports: + current = self.get_net_port_rest(port) + if current is None: + missing_ports.append(port) + else: + desired_ports.append(current) + # Error if any of provided ports are not found. + if missing_ports and self.parameters['state'] == 'present': + self.module.fail_json(msg='Error: ports: %s not found' % ', '.join(missing_ports)) + return desired_ports + + def get_net_port_rest(self, port): + if ':' not in port: + error_msg = "Error: Invalid value specified for port: %s, provide port name as node_name:port_name" % port + self.module.fail_json(msg=error_msg) + port_name = port.split(':')[1] + node = port.split(':')[0] + api = 'network/ethernet/ports' + query = { + 'name': port_name, + 'node.name': node, + } + fields = 'name,uuid' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + if record: + current = {'uuid': record['uuid'], 'name': record['name']} + return current + return None + + def ports_to_add_move_from_desired(self, ports): + ports_to_add_move = [] + for port in ports: + port_name = port.split(':')[1] + for port_to_add_or_move in self.desired_ports: + if port_name == port_to_add_or_move['name']: + ports_to_add_move.append({'uuid': port_to_add_or_move['uuid']}) + return ports_to_add_move + + def apply(self): + """ + Run Module based on play book + """ + current = self.get_broadcast_domain() + cd_action, split = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and any(self.parameters.get(attr) is not None for attr in ('from_name', 'from_ipspace')): + # either create new domain or split domain, also ipspace can be modified. + from_name = self.parameters.get('from_name', self.parameters['name']) + from_ipspace = self.parameters.get('from_ipspace', self.parameters.get('ipspace')) + from_current = self.get_broadcast_domain(from_name, from_ipspace) + split = self.na_helper.is_rename_action(from_current, current) + if split is None: + self.module.fail_json(msg='A domain cannot be split if it does not exist.', + exception=traceback.format_exc()) + if split: + cd_action = None + current = from_current + if self.use_rest: + split = False + # check for exact match of ports only if from_name present. + if self.parameters.get('from_name'): + # rename with no change in ports. + if 'ports' not in self.parameters: + self.parameters['ports'] = from_current['ports'] + partial_match = set(from_current['ports']) - set(self.parameters['ports']) + # create new broadcast domain with desired ports (REST will move them over from the other domain if necessary) + if partial_match: + cd_action = 'create' + current = None + # rename with no change in ports. + else: + self.parameters.pop('from_name') + modify = self.get_modify_attributes(current, split) if cd_action is None else {} + if self.na_helper.changed and not self.module.check_mode: + if split: + self.split_broadcast_domain() + if cd_action == 'create': + self.create_broadcast_domain(self.desired_ports) + elif cd_action == 'delete': + self.delete_broadcast_domain(current=current) + elif modify: + self.modify_broadcast_domain_or_ports(modify, current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp ONTAP Broadcast Domain Object that can be created, deleted and modified. + """ + obj = NetAppOntapBroadcastDomain() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py new file mode 100644 index 000000000..baa949bd3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_broadcast_domain_ports.py @@ -0,0 +1,224 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_broadcast_domain_ports +short_description: NetApp ONTAP manage broadcast domain ports +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Add or remove ONTAP broadcast domain ports. Existing ports that are not listed are kept. +options: + state: + description: + - Whether the specified broadcast domain should exist or not. + choices: ['present', 'absent'] + type: str + default: present + broadcast_domain: + description: + - Specify the broadcast_domain name + required: true + type: str + ipspace: + description: + - Specify the ipspace for the broadcast domain + type: str + ports: + description: + - Specify the list of ports to add to or remove from this broadcast domain. + required: true + type: list + elements: str + +''' + +EXAMPLES = """ + - name: create broadcast domain ports + na_ontap_broadcast_domain_ports: + state=present + username={{ netapp_username }} + password={{ netapp_password }} + hostname={{ netapp_hostname }} + broadcast_domain=123kevin + ports=khutton-vsim1:e0d-13 + - name: delete broadcast domain ports + na_ontap_broadcast_domain_ports: + state=absent + username={{ netapp_username }} + password={{ netapp_password }} + hostname={{ netapp_hostname }} + broadcast_domain=123kevin + ports=khutton-vsim1:e0d-13 +""" + +RETURN = """ + + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapBroadcastDomainPorts(object): + """ + Create and Destroys Broadcast Domain Ports + """ + def __init__(self): + """ + Initialize the Ontap Net Route class + """ + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + broadcast_domain=dict(required=True, type='str'), + ipspace=dict(required=False, type='str', default=None), + ports=dict(required=True, type='list', elements='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + parameters = self.module.params + self.na_helper = NetAppModule(self.module) + self.na_helper.module_replaces('na_ontap_ports', self.module) + msg = 'The module only supports ZAPI and is deprecated; netapp.ontap.na_ontap_ports should be used instead.' + self.na_helper.fall_back_to_zapi(self.module, msg, parameters) + + # set up state variables + self.state = parameters['state'] + self.broadcast_domain = parameters['broadcast_domain'] + self.ipspace = parameters['ipspace'] + self.ports = parameters['ports'] + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + return + + def get_broadcast_domain_ports(self): + """ + Return details about the broadcast domain ports + :param: + name : broadcast domain name + :return: Details about the broadcast domain. None if not found. + :rtype: dict + """ + domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter') + broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info') + broadcast_domain_info.add_new_child('broadcast-domain', self.broadcast_domain) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(broadcast_domain_info) + domain_get_iter.add_child_elem(query) + result = self.server.invoke_successfully(domain_get_iter, True) + domain_exists = None + # check if broadcast domain exists + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info') + domain_name = domain_info.get_child_content('broadcast-domain') + domain_ports = domain_info.get_child_by_name('ports') + if domain_ports is not None: + ports = [port.get_child_content('port') for port in domain_ports.get_children()] + else: + ports = [] + domain_exists = { + 'domain-name': domain_name, + 'ports': ports + } + return domain_exists + + def create_broadcast_domain_ports(self, ports): + """ + Creates new broadcast domain ports + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports') + domain_obj.add_new_child("broadcast-domain", self.broadcast_domain) + if self.ipspace: + domain_obj.add_new_child("ipspace", self.ipspace) + if ports: + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in ports: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' % + (self.broadcast_domain, to_native(error)), + exception=traceback.format_exc()) + + def delete_broadcast_domain_ports(self, ports): + """ + Deletes broadcast domain ports + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports') + domain_obj.add_new_child("broadcast-domain", self.broadcast_domain) + if self.ipspace: + domain_obj.add_new_child("ipspace", self.ipspace) + if ports: + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in ports: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' % + (self.broadcast_domain, to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + """ + Run Module based on play book + """ + changed = False + broadcast_domain_details = self.get_broadcast_domain_ports() + if broadcast_domain_details is None: + self.module.fail_json(msg='Error broadcast domain not found: %s' % self.broadcast_domain) + if self.state == 'present': # execute create + ports_to_add = [port for port in self.ports if port not in broadcast_domain_details['ports']] + if len(ports_to_add) > 0: + if not self.module.check_mode: + self.create_broadcast_domain_ports(ports_to_add) + changed = True + elif self.state == 'absent': # execute delete + ports_to_delete = [port for port in self.ports if port in broadcast_domain_details['ports']] + if len(ports_to_delete) > 0: + if not self.module.check_mode: + self.delete_broadcast_domain_ports(ports_to_delete) + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + """ + Creates the NetApp Ontap Net Route object and runs the correct play task + """ + obj = NetAppOntapBroadcastDomainPorts() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py new file mode 100644 index 000000000..313bf223e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cg_snapshot.py @@ -0,0 +1,229 @@ +#!/usr/bin/python + +# (c) 2018-2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +short_description: NetApp ONTAP manage consistency group snapshot +author: NetApp Ansible Team (@carchi8py) +description: + - Create consistency group snapshot for ONTAP volumes. + - This module only supports ZAPI and is deprecated. + - The final version of ONTAP to support ZAPI is 9.12.1. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +module: na_ontap_cg_snapshot +options: + state: + description: + - If you want to create a snapshot. + default: present + type: str + vserver: + required: true + type: str + description: + - Name of the vserver. + volumes: + required: true + type: list + elements: str + description: + - A list of volumes in this filer that is part of this CG operation. + snapshot: + required: true + type: str + description: + - The provided name of the snapshot that is created in each volume. + timeout: + description: + - Timeout selector. + choices: ['urgent', 'medium', 'relaxed'] + type: str + default: medium + snapmirror_label: + description: + - A human readable SnapMirror label to be attached with the consistency group snapshot copies. + type: str +version_added: 2.7.0 + +''' + +EXAMPLES = """ + - name: + na_ontap_cg_snapshot: + state: present + vserver: vserver_name + snapshot: snapshot name + volumes: vol_name + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPCGSnapshot(object): + """ + Methods to create CG snapshots + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', default='present'), + vserver=dict(required=True, type='str'), + volumes=dict(required=True, type='list', elements='str'), + snapshot=dict(required=True, type='str'), + timeout=dict(required=False, type='str', choices=[ + 'urgent', 'medium', 'relaxed'], default='medium'), + snapmirror_label=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=False + ) + + parameters = self.module.params + + # set up variables + self.state = parameters['state'] + self.vserver = parameters['vserver'] + self.volumes = parameters['volumes'] + self.snapshot = parameters['snapshot'] + self.timeout = parameters['timeout'] + self.snapmirror_label = parameters['snapmirror_label'] + self.cgid = None + NetAppModule().module_deprecated(self.module) + if HAS_NETAPP_LIB is False: + self.module.fail_json( + msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi( + module=self.module, vserver=self.vserver) + + def does_snapshot_exist(self, volume): + """ + This is duplicated from na_ontap_snapshot + Checks to see if a snapshot exists or not + :return: Return True if a snapshot exists, false if it dosn't + """ + # TODO: Remove this method and import snapshot module and + # call get after re-factoring __init__ across all the modules + # we aren't importing now, since __init__ does a lot of Ansible setup + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter") + desired_attr = netapp_utils.zapi.NaElement("desired-attributes") + snapshot_info = netapp_utils.zapi.NaElement('snapshot-info') + comment = netapp_utils.zapi.NaElement('comment') + # add more desired attributes that are allowed to be modified + snapshot_info.add_child_elem(comment) + desired_attr.add_child_elem(snapshot_info) + snapshot_obj.add_child_elem(desired_attr) + # compose query + query = netapp_utils.zapi.NaElement("query") + snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") + snapshot_info_obj.add_new_child("name", self.snapshot) + snapshot_info_obj.add_new_child("volume", volume) + snapshot_info_obj.add_new_child("vserver", self.vserver) + query.add_child_elem(snapshot_info_obj) + snapshot_obj.add_child_elem(query) + result = self.server.invoke_successfully(snapshot_obj, True) + return_value = None + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + attributes_list = result.get_child_by_name('attributes-list') + snap_info = attributes_list.get_child_by_name('snapshot-info') + return_value = {'comment': snap_info.get_child_content('comment')} + return return_value + + def cgcreate(self): + """ + Calls cg-start and cg-commit (when cg-start succeeds) + """ + started = self.cg_start() + if started: + if self.cgid is not None: + self.cg_commit() + else: + self.module.fail_json(msg="Error fetching CG ID for CG commit %s" % self.snapshot, + exception=traceback.format_exc()) + return started + + def cg_start(self): + """ + For the given list of volumes, creates cg-snapshot + """ + snapshot_started = False + cgstart = netapp_utils.zapi.NaElement("cg-start") + cgstart.add_new_child("snapshot", self.snapshot) + cgstart.add_new_child("timeout", self.timeout) + volume_list = netapp_utils.zapi.NaElement("volumes") + cgstart.add_child_elem(volume_list) + for vol in self.volumes: + snapshot_exists = self.does_snapshot_exist(vol) + if snapshot_exists is None: + snapshot_started = True + volume_list.add_new_child("volume-name", vol) + if snapshot_started: + if self.snapmirror_label: + cgstart.add_new_child("snapmirror-label", + self.snapmirror_label) + try: + cgresult = self.server.invoke_successfully( + cgstart, enable_tunneling=True) + if cgresult.get_child_by_name('cg-id'): + self.cgid = cgresult['cg-id'] + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error creating CG snapshot %s: %s" % + (self.snapshot, to_native(error)), + exception=traceback.format_exc()) + return snapshot_started + + def cg_commit(self): + """ + When cg-start is successful, performs a cg-commit with the cg-id + """ + cgcommit = netapp_utils.zapi.NaElement.create_node_with_children( + 'cg-commit', **{'cg-id': self.cgid}) + try: + self.server.invoke_successfully(cgcommit, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error committing CG snapshot %s: %s" % + (self.snapshot, to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + '''Applies action from playbook''' + if not self.module.check_mode: + changed = self.cgcreate() + self.module.exit_json(changed=changed) + + +def main(): + '''Execute action from playbook''' + cg_obj = NetAppONTAPCGSnapshot() + cg_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py new file mode 100644 index 000000000..b04a37110 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs.py @@ -0,0 +1,563 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# import untangle + +''' +na_ontap_cifs +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create or destroy or modify(path) cifs-share on ONTAP. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_cifs + +options: + + comment: + description: + - The CIFS share description. + type: str + version_added: 21.7.0 + + path: + description: + - The file system path that is shared through this CIFS share. The path is the full, user visible path relative + to the vserver root, and it might be crossing junction mount points. The path is in UTF8 and uses forward + slash as directory separator. + type: str + + vserver: + description: + - Vserver containing the CIFS share. + required: true + type: str + + name: + description: + - The name of the CIFS share. The CIFS share name is a UTF-8 string with the following characters being + illegal; control characters from 0x00 to 0x1F, both inclusive, 0x22 (double quotes) + required: true + aliases: ['share_name'] + type: str + + share_properties: + description: + - The list of properties for the CIFS share. + - Not supported with REST. + - share-properties are separate fields in the REST API. + - You can achieve this functionality by setting C(access_based_enumeration), C(change_notify), C(encryption), + C(home_directory), C(oplocks), C(show_snapshot), C(continuously_available) and C(namespace_caching). + type: list + elements: str + version_added: 2.8.0 + + symlink_properties: + description: + - The list of symlink properties for this CIFS share. + - Not supported with REST, this option is replaced with C(unix_symlink) in REST. + type: list + elements: str + version_added: 2.8.0 + + state: + choices: ['present', 'absent'] + description: + - Whether the specified CIFS share should exist or not. + type: str + default: present + + vscan_fileop_profile: + choices: ['no_scan', 'standard', 'strict', 'writes_only'] + description: + - Profile_set of file_ops to which vscan on access scanning is applicable. + - Not supported with REST. + type: str + version_added: 2.9.0 + + unix_symlink: + choices: ['local', 'widelink', 'disable'] + description: + - The list of unix_symlink properties for this CIFS share + - This option only supported with REST. + type: str + version_added: 21.19.0 + + access_based_enumeration: + description: + - If enabled, all folders inside this share are visible to a user based on that individual user access right; + prevents the display of folders or other shared resources that the user does not have access to. + - This option only supported with REST. + type: bool + version_added: 22.3.0 + + allow_unencrypted_access: + description: + - Specifies whether or not the SMB2 clients are allowed to access the encrypted share. + - This option requires REST and ONTAP 9.11.0 or later. + type: bool + version_added: 22.3.0 + + change_notify: + description: + - Specifies whether CIFS clients can request for change notifications for directories on this share. + - This option only supported with REST. + type: bool + version_added: 22.3.0 + + encryption: + description: + - Specifies that SMB encryption must be used when accessing this share. Clients that do not support encryption are not + able to access this share. + - This option only supported with REST. + type: bool + version_added: 22.3.0 + + home_directory: + description: + - Specifies whether or not the share is a home directory share, where the share and path names are dynamic. + - ONTAP home directory functionality automatically offer each user a dynamic share to their home directory without creating an + individual SMB share for each user. + - This feature enable us to configure a share that maps to different directories based on the user that connects to it + - Instead of creating a separate shares for each user, a single share with a home directory parameters can be created. + - In a home directory share, ONTAP dynamically generates the share-name and share-path by substituting + %w, %u, and %d variables with the corresponding Windows user name, UNIX user name, and domain name, respectively. + - This option only supported with REST and cannot modify. + type: bool + version_added: 22.3.0 + + namespace_caching: + description: + - Specifies whether or not the SMB clients connecting to this share can cache the directory enumeration + results returned by the CIFS servers. + - This option requires REST and ONTAP 9.10.1 or later. + type: bool + version_added: 22.3.0 + + oplocks: + description: + - Specify whether opportunistic locks are enabled on this share. "Oplocks" allow clients to lock files and cache content locally, + which can increase performance for file operations. + - Only supported with REST. + type: bool + version_added: 22.3.0 + + show_snapshot: + description: + - Specifies whether or not the Snapshot copies can be viewed and traversed by clients. + - This option requires REST and ONTAP 9.10.1 or later. + type: bool + version_added: 22.3.0 + + continuously_available : + description: + - Specifies whether or not the clients connecting to this share can open files in a persistent manner. + - Files opened in this way are protected from disruptive events, such as, failover and giveback. + - This option requires REST and ONTAP 9.10.1 or later. + type: bool + version_added: 22.3.0 + + browsable: + description: + - Specifies whether or not the Windows clients can browse the share. + - This option requires REST and ONTAP 9.13.1 or later. + type: bool + version_added: 22.5.0 + + show_previous_versions: + description: + - Specifies that the previous version can be viewed and restored from the client. + - This option requires REST and ONTAP 9.13.1 or later. + type: bool + version_added: 22.5.0 + +short_description: NetApp ONTAP Manage cifs-share +version_added: 2.6.0 + +''' + +EXAMPLES = """ + - name: Create CIFS share - ZAPI + netapp.ontap.na_ontap_cifs: + state: present + name: cifsShareName + path: / + vserver: vserverName + share_properties: browsable,oplocks + symlink_properties: read_only,enable + comment: CIFS share description + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete CIFS share - ZAPI + netapp.ontap.na_ontap_cifs: + state: absent + name: cifsShareName + vserver: vserverName + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify path CIFS share - ZAPI + netapp.ontap.na_ontap_cifs: + state: present + name: pb_test + vserver: vserverName + path: / + share_properties: show_previous_versions + symlink_properties: disable + vscan_fileop_profile: no_scan + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create CIFS share - REST + netapp.ontap.na_ontap_cifs: + state: present + name: cifsShareName + path: / + vserver: vserverName + oplocks: true + change_notify: true + unix_symlink: disable + comment: CIFS share description + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify CIFS share - REST + netapp.ontap.na_ontap_cifs: + state: present + name: cifsShareName + path: / + vserver: vserverName + oplocks: true + change_notify: true + unix_symlink: local + comment: CIFS share description + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPCifsShare: + """ + Methods to create/delete/modify(path) CIFS share + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str', aliases=['share_name']), + path=dict(required=False, type='str'), + comment=dict(required=False, type='str'), + vserver=dict(required=True, type='str'), + unix_symlink=dict(required=False, type='str', choices=['local', 'widelink', 'disable']), + share_properties=dict(required=False, type='list', elements='str'), + symlink_properties=dict(required=False, type='list', elements='str'), + vscan_fileop_profile=dict(required=False, type='str', choices=['no_scan', 'standard', 'strict', 'writes_only']), + access_based_enumeration=dict(required=False, type='bool'), + change_notify=dict(required=False, type='bool'), + encryption=dict(required=False, type='bool'), + home_directory=dict(required=False, type='bool'), + oplocks=dict(required=False, type='bool'), + show_snapshot=dict(required=False, type='bool'), + allow_unencrypted_access=dict(required=False, type='bool'), + namespace_caching=dict(required=False, type='bool'), + continuously_available=dict(required=False, type='bool'), + browsable=dict(required=False, type='bool'), + show_previous_versions=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + partially_supported_rest_properties = [['continuously_available', (9, 10, 1)], ['namespace_caching', (9, 10, 1)], + ['show_snapshot', (9, 10, 1)], ['allow_unencrypted_access', (9, 11)], + ['browsable', (9, 13, 1)], ['show_previous_versions', (9, 13, 1)]] + unsupported_rest_properties = ['share_properties', 'symlink_properties', 'vscan_fileop_profile'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + self.unsupported_zapi_properties = ['unix_symlink', 'access_based_enumeration', 'change_notify', 'encryption', 'home_directory', + 'oplocks', 'continuously_available', 'show_snapshot', 'namespace_caching', 'allow_unencrypted_access', + 'browsable', 'show_previous_versions'] + self.svm_uuid = None + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + for unsupported_zapi_property in self.unsupported_zapi_properties: + if self.parameters.get(unsupported_zapi_property) is not None: + msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property + self.module.fail_json(msg=msg) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_cifs_share(self): + """ + Return details about the cifs-share + :param: + name : Name of the cifs-share + :return: Details about the cifs-share. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_cifs_share_rest() + cifs_iter = netapp_utils.zapi.NaElement('cifs-share-get-iter') + cifs_info = netapp_utils.zapi.NaElement('cifs-share') + cifs_info.add_new_child('share-name', self.parameters.get('name')) + cifs_info.add_new_child('vserver', self.parameters.get('vserver')) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(cifs_info) + + cifs_iter.add_child_elem(query) + + result = self.server.invoke_successfully(cifs_iter, True) + + return_value = None + # check if query returns the expected cifs-share + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + properties_list = [] + symlink_list = [] + cifs_attrs = result.get_child_by_name('attributes-list').\ + get_child_by_name('cifs-share') + if cifs_attrs.get_child_by_name('share-properties'): + properties_attrs = cifs_attrs['share-properties'] + if properties_attrs is not None: + properties_list = [property.get_content() for property in properties_attrs.get_children()] + if cifs_attrs.get_child_by_name('symlink-properties'): + symlink_attrs = cifs_attrs['symlink-properties'] + if symlink_attrs is not None: + symlink_list = [symlink.get_content() for symlink in symlink_attrs.get_children()] + return_value = { + 'share': cifs_attrs.get_child_content('share-name'), + 'path': cifs_attrs.get_child_content('path'), + 'share_properties': properties_list, + 'symlink_properties': symlink_list + } + value = cifs_attrs.get_child_content('comment') + return_value['comment'] = value if value is not None else '' + if cifs_attrs.get_child_by_name('vscan-fileop-profile'): + return_value['vscan_fileop_profile'] = cifs_attrs['vscan-fileop-profile'] + + return return_value + + def create_cifs_share(self): + """ + Create CIFS share + """ + options = {'share-name': self.parameters.get('name'), + 'path': self.parameters.get('path')} + cifs_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-share-create', **options) + self.create_modify_cifs_share(cifs_create, 'creating') + + def create_modify_cifs_share(self, zapi_request, action): + if self.parameters.get('share_properties'): + property_attrs = netapp_utils.zapi.NaElement('share-properties') + zapi_request.add_child_elem(property_attrs) + for aproperty in self.parameters.get('share_properties'): + property_attrs.add_new_child('cifs-share-properties', aproperty) + if self.parameters.get('symlink_properties'): + symlink_attrs = netapp_utils.zapi.NaElement('symlink-properties') + zapi_request.add_child_elem(symlink_attrs) + for symlink in self.parameters.get('symlink_properties'): + symlink_attrs.add_new_child('cifs-share-symlink-properties', symlink) + if self.parameters.get('vscan_fileop_profile'): + fileop_attrs = netapp_utils.zapi.NaElement('vscan-fileop-profile') + fileop_attrs.set_content(self.parameters['vscan_fileop_profile']) + zapi_request.add_child_elem(fileop_attrs) + if self.parameters.get('comment'): + zapi_request.add_new_child('comment', self.parameters['comment']) + + try: + self.server.invoke_successfully(zapi_request, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + + self.module.fail_json(msg='Error %s cifs-share %s: %s' + % (action, self.parameters.get('name'), to_native(error)), + exception=traceback.format_exc()) + + def delete_cifs_share(self): + """ + Delete CIFS share + """ + cifs_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-share-delete', **{'share-name': self.parameters.get('name')}) + + try: + self.server.invoke_successfully(cifs_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting cifs-share %s: %s' + % (self.parameters.get('name'), to_native(error)), + exception=traceback.format_exc()) + + def modify_cifs_share(self): + """ + modilfy path for the given CIFS share + """ + options = {'share-name': self.parameters.get('name')} + cifs_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-share-modify', **options) + if self.parameters.get('path'): + cifs_modify.add_new_child('path', self.parameters.get('path')) + self.create_modify_cifs_share(cifs_modify, 'modifying') + + def get_cifs_share_rest(self): + """ + get details of the CIFS share with rest API. + """ + options = {'svm.name': self.parameters.get('vserver'), + 'name': self.parameters.get('name'), + 'fields': 'svm.uuid,' + 'name,' + 'path,' + 'comment,' + 'unix_symlink,' + 'access_based_enumeration,' + 'change_notify,' + 'encryption,' + 'oplocks,'} + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + options['fields'] += 'show_snapshot,namespace_caching,continuously_available,' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 0): + options['fields'] += 'allow_unencrypted_access,' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 13, 1): + options['fields'] += 'browsable,show_previous_versions,' + api = 'protocols/cifs/shares' + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg="Error on fetching cifs shares: %s" % error) + if record: + self.svm_uuid = record['svm']['uuid'] + return { + 'path': record['path'], + 'comment': record.get('comment', ''), + 'unix_symlink': record.get('unix_symlink', ''), + 'access_based_enumeration': record.get('access_based_enumeration'), + 'change_notify': record.get('change_notify'), + 'encryption': record.get('encryption'), + 'oplocks': record.get('oplocks'), + 'continuously_available': record.get('continuously_available'), + 'show_snapshot': record.get('show_snapshot'), + 'namespace_caching': record.get('namespace_caching'), + 'allow_unencrypted_access': record.get('allow_unencrypted_access'), + 'browsable': record.get('browsable'), + 'show_previous_versions': record.get('show_previous_versions') + } + return None + + def create_modify_body_rest(self, params=None): + body = {} + # modify is set in params, if not assign self.parameters for create. + if params is None: + params = self.parameters + options = ['path', 'comment', 'unix_symlink', 'access_based_enumeration', 'change_notify', 'encryption', + 'home_directory', 'oplocks', 'continuously_available', 'show_snapshot', 'namespace_caching', + 'allow_unencrypted_access', 'browsable', 'show_previous_versions'] + for key in options: + if key in params: + body[key] = params[key] + return body + + def create_cifs_share_rest(self): + """ + create CIFS share with rest API. + """ + if not self.use_rest: + return self.create_cifs_share() + body = self.create_modify_body_rest() + if 'vserver' in self.parameters: + body['svm.name'] = self.parameters['vserver'] + if 'name' in self.parameters: + body['name'] = self.parameters['name'] + if 'path' in self.parameters: + body['path'] = self.parameters['path'] + api = 'protocols/cifs/shares' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating cifs shares: %s" % error) + + def delete_cifs_share_rest(self): + """ + delete CIFS share with rest API. + """ + if not self.use_rest: + return self.delete_cifs_share() + body = {'name': self.parameters.get('name')} + api = 'protocols/cifs/shares' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.svm_uuid, body) + if error is not None: + self.module.fail_json(msg=" Error on deleting cifs shares: %s" % error) + + def modify_cifs_share_rest(self, modify): + """ + modilfy the given CIFS share with rest API. + """ + if not self.use_rest: + return self.modify_cifs_share() + api = 'protocols/cifs/shares/%s' % self.svm_uuid + body = self.create_modify_body_rest(modify) + if body: + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body) + if error is not None: + self.module.fail_json(msg="Error on modifying cifs shares: %s" % error) + + def apply(self): + '''Apply action to cifs share''' + current = self.get_cifs_share() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + # ZAPI accepts both 'show-previous-versions' and 'show_previous_versions', but only returns the latter + if not self.use_rest and cd_action is None and 'show-previous-versions' in self.parameters.get('share_properties', [])\ + and current and 'show_previous_versions' in current.get('share_properties', []): + self.parameters['share_properties'].remove('show-previous-versions') + self.parameters['share_properties'].append('show_previous_versions') + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_cifs_share_rest() + elif cd_action == 'delete': + self.delete_cifs_share_rest() + elif modify: + self.modify_cifs_share_rest(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Execute action from playbook''' + cifs_obj = NetAppONTAPCifsShare() + cifs_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py new file mode 100644 index 000000000..110d56001 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_acl.py @@ -0,0 +1,351 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - "Create or destroy or modify cifs-share-access-controls on ONTAP" +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_cifs_acl +options: + permission: + choices: ['no_access', 'read', 'change', 'full_control'] + type: str + description: + - The access rights that the user or group has on the defined CIFS share. + share_name: + description: + - The name of the cifs-share-access-control to manage. + required: true + type: str + aliases: ['share'] + state: + choices: ['present', 'absent'] + description: + - Whether the specified CIFS share acl should exist or not. + default: present + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + user_or_group: + description: + - The user or group name for which the permissions are listed. + required: true + type: str + type: + description: + - The type (also known as user-group-type) of the user or group to add to the ACL. + - Type is required for create, delete and modify unix-user or unix-group to/from the ACL in ZAPI. + type: str + choices: [windows, unix_user, unix_group] + version_added: 21.17.0 +short_description: NetApp ONTAP manage cifs-share-access-control + +''' + +EXAMPLES = """ + - name: Create CIFS share acl + netapp.ontap.na_ontap_cifs_acl: + state: present + share_name: cifsShareName + user_or_group: Everyone + permission: read + vserver: "{{ netapp_vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Modify CIFS share acl permission + netapp.ontap.na_ontap_cifs_acl: + state: present + share_name: cifsShareName + user_or_group: Everyone + permission: change + vserver: "{{ netapp_vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPCifsAcl: + """ + Methods to create/delete/modify CIFS share/user access-control + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + share_name=dict(required=True, type='str', aliases=['share']), + user_or_group=dict(required=True, type='str'), + permission=dict(required=False, type='str', choices=['no_access', 'read', 'change', 'full_control']), + type=dict(required=False, type='str', choices=['windows', 'unix_user', 'unix_group']), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['permission']) + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_cifs_acl(self): + """ + Return details about the cifs-share-access-control + :param: + name : Name of the cifs-share-access-control + :return: Details about the cifs-share-access-control. None if not found. + :rtype: dict + """ + cifs_acl_iter = netapp_utils.zapi.NaElement('cifs-share-access-control-get-iter') + cifs_acl_info = netapp_utils.zapi.NaElement('cifs-share-access-control') + cifs_acl_info.add_new_child('share', self.parameters['share_name']) + cifs_acl_info.add_new_child('user-or-group', self.parameters['user_or_group']) + cifs_acl_info.add_new_child('vserver', self.parameters['vserver']) + if self.parameters.get('type') is not None: + cifs_acl_info.add_new_child('user-group-type', self.parameters['type']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(cifs_acl_info) + cifs_acl_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(cifs_acl_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting cifs-share-access-control %s: %s' + % (self.parameters['share_name'], to_native(error))) + return_value = None + # check if query returns the expected cifs-share-access-control + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + + cifs_acl = result.get_child_by_name('attributes-list').get_child_by_name('cifs-share-access-control') + return_value = { + 'share': cifs_acl.get_child_content('share'), + 'user-or-group': cifs_acl.get_child_content('user-or-group'), + 'permission': cifs_acl.get_child_content('permission'), + 'type': cifs_acl.get_child_content('user-group-type'), + } + return return_value + + def create_cifs_acl(self): + """ + Create access control for the given CIFS share/user-group + """ + options = { + 'share': self.parameters['share_name'], + 'user-or-group': self.parameters['user_or_group'], + 'permission': self.parameters['permission'] + } + # type is required for unix-user and unix-group + if self.parameters.get('type') is not None: + options['user-group-type'] = self.parameters['type'] + + cifs_acl_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-share-access-control-create', **options) + try: + self.server.invoke_successfully(cifs_acl_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating cifs-share-access-control %s: %s' + % (self.parameters['share_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_cifs_acl(self): + """ + Delete access control for the given CIFS share/user-group + """ + options = { + 'share': self.parameters['share_name'], + 'user-or-group': self.parameters['user_or_group'] + } + # type is required for unix-user and unix-group + if self.parameters.get('type') is not None: + options['user-group-type'] = self.parameters['type'] + cifs_acl_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-share-access-control-delete', **options) + try: + self.server.invoke_successfully(cifs_acl_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting cifs-share-access-control %s: %s' + % (self.parameters['share_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_cifs_acl_permission(self): + """ + Change permission or type for the given CIFS share/user-group + """ + options = { + 'share': self.parameters['share_name'], + 'user-or-group': self.parameters['user_or_group'], + 'permission': self.parameters['permission'] + } + # type is required for unix-user and unix-group + if self.parameters.get('type') is not None: + options['user-group-type'] = self.parameters['type'] + + cifs_acl_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-share-access-control-modify', **options) + try: + self.server.invoke_successfully(cifs_acl_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying cifs-share-access-control permission %s: %s' + % (self.parameters['share_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_modify(self, current): + + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if not modify or ('permission' in modify and len(modify) == 1): + return modify + if 'type' in modify: + self.module.fail_json(msg='Error: changing the type is not supported by ONTAP - current: %s, desired: %s' + % (current['type'], self.parameters['type'])) + self.module.fail_json(msg='Error: only permission can be changed - modify: %s' % modify) + + def get_cifs_share_rest(self): + """ + get uuid of the svm which has CIFS share with rest API. + """ + options = {'svm.name': self.parameters.get('vserver'), + 'name': self.parameters.get('share_name')} + api = 'protocols/cifs/shares' + fields = 'svm.uuid,name' + record, error = rest_generic.get_one_record(self.rest_api, api, options, fields) + if error: + self.module.fail_json(msg="Error on fetching cifs shares: %s" % error) + if record: + return {'uuid': record['svm']['uuid']} + self.module.fail_json(msg="Error: the cifs share does not exist: %s" % self.parameters['share_name']) + + def get_cifs_acl_rest(self, svm_uuid): + """ + get details of the CIFS share acl with rest API. + """ + if not self.use_rest: + return self.get_cifs_acl() + query = {'user_or_group': self.parameters.get('user_or_group')} + ug_type = self.parameters.get('type') + if ug_type: + query['type'] = ug_type + api = 'protocols/cifs/shares/%s/%s/acls' % (svm_uuid['uuid'], self.parameters.get('share_name')) + fields = 'svm.uuid,user_or_group,type,permission' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error on fetching cifs shares acl: %s" % error) + if record: + return { + 'uuid': record['svm']['uuid'], + 'share': record['share'], + 'user_or_group': record['user_or_group'], + 'type': record['type'], + 'permission': record['permission'] + } + return None + + def create_cifs_acl_rest(self, svm_uuid): + """ + create CIFS share acl with rest API. + """ + if not self.use_rest: + return self.create_cifs_acl() + body = { + 'user_or_group': self.parameters.get('user_or_group'), + 'permission': self.parameters.get('permission') + } + ug_type = self.parameters.get('type') + if ug_type: + body['type'] = ug_type + api = 'protocols/cifs/shares/%s/%s/acls' % (svm_uuid['uuid'], self.parameters.get('share_name')) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating cifs share acl: %s" % error) + + def delete_cifs_acl_rest(self, current): + """ + Delete access control for the given CIFS share/user-group with rest API. + """ + if not self.use_rest: + return self.delete_cifs_acl() + body = {'svm.name': self.parameters.get('vserver')} + api = 'protocols/cifs/shares/%s/%s/acls/%s/%s' % ( + current['uuid'], self.parameters.get('share_name'), self.parameters.get('user_or_group'), current.get('type')) + dummy, error = rest_generic.delete_async(self.rest_api, api, None, body) + if error is not None: + self.module.fail_json(msg="Error on deleting cifs share acl: %s" % error) + + def modify_cifs_acl_permission_rest(self, current): + """ + Change permission for the given CIFS share/user-group with rest API. + """ + if not self.use_rest: + return self.modify_cifs_acl_permission() + body = {'permission': self.parameters.get('permission')} + api = 'protocols/cifs/shares/%s/%s/acls/%s/%s' % ( + current['uuid'], self.parameters.get('share_name'), self.parameters.get('user_or_group'), current.get('type')) + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error is not None: + self.module.fail_json(msg="Error modifying cifs share ACL permission: %s" % error) + + def apply(self): + """ + Apply action to cifs-share-access-control + """ + svm_uuid = self.get_cifs_share_rest() if self.use_rest else None + current = self.get_cifs_acl_rest(svm_uuid) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.get_modify(current) if cd_action is None and self.parameters['state'] == 'present' else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_cifs_acl_rest(svm_uuid) + if cd_action == 'delete': + self.delete_cifs_acl_rest(current) + if modify: + self.modify_cifs_acl_permission_rest(current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + cifs_acl = NetAppONTAPCifsAcl() + cifs_acl.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py new file mode 100644 index 000000000..42a552a41 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group.py @@ -0,0 +1,235 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_cifs_local_group +short_description: NetApp Ontap - create, delete or modify CIFS local group. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.1.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify CIFS local group. +options: + state: + description: + - Whether the specified member should be part of the CIFS local group + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver that owns the CIFS local group + required: true + type: str + + name: + description: + - Specifies name of the CIFS local group + required: true + type: str + + from_name: + description: + - Specifies the existing cifs local group name. + - This option is used to rename cifs local group. + type: str + + description: + description: + - Description for the local group. + type: str +""" + +EXAMPLES = """ + - name: create CIFS local group + netapp.ontap.na_ontap_cifs_local_group: + state: present + vserver: svm1 + name: BUILTIN\\administrators + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Delete CIFS local group + netapp.ontap.na_ontap_cifs_local_group: + state: absent + vserver: svm1 + name: BUILTIN\\administrators + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Modify CIFS local group description + netapp.ontap.na_ontap_cifs_local_group: + state: present + vserver: svm1 + name: BUILTIN\\administrators + descrition: 'CIFS local group' + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Rename CIFS local group description + netapp.ontap.na_ontap_cifs_local_group: + state: present + vserver: svm1 + name: ANSIBLE_CIFS\\test_users + descrition: 'CIFS local group' + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapCifsLocalGroup: + """ + Create, delete or modify CIFS local group + """ + def __init__(self): + """ + Initialize the Ontap CifsLocalGroup class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + description=dict(required=False, type='str'), + from_name=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_cifs_local_group', 9, 10, 1) + self.svm_uuid = None + self.sid = None + + def get_cifs_local_group_rest(self, from_name=None): + """ + Retrieves the local group of an SVM. + """ + api = "protocols/cifs/local-groups" + query = { + 'name': from_name or self.parameters['name'], + 'svm.name': self.parameters['vserver'], + 'fields': 'svm.uuid,sid,description' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching cifs local-group: %s" % error) + if record: + self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid']) + self.sid = self.na_helper.safe_get(record, ['sid']) + return { + 'name': self.na_helper.safe_get(record, ['name']), + 'description': record.get('description', ''), + } + return None + + def create_cifs_local_group_rest(self): + """ + Creates the local group of an SVM. + """ + api = "protocols/cifs/local-groups" + body = { + 'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'] + } + if 'description' in self.parameters: + body['description'] = self.parameters['description'] + record, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error on creating cifs local-group: %s" % error) + + def delete_cifs_local_group_rest(self): + """ + Destroy the local group of an SVM. + """ + api = "protocols/cifs/local-groups/%s/%s" % (self.svm_uuid, self.sid) + record, error = rest_generic.delete_async(self.rest_api, api, None) + if error: + self.module.fail_json(msg="Error on deleting cifs local-group: %s" % error) + + def modify_cifs_local_group_rest(self, modify): + """ + Modify the description of CIFS local group. + Rename cifs local group. + """ + body = {} + if 'description' in modify: + body['description'] = self.parameters['description'] + if 'name' in modify: + body['name'] = self.parameters['name'] + api = "protocols/cifs/local-groups/%s/%s" % (self.svm_uuid, self.sid) + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error is not None: + self.module.fail_json(msg="Error on modifying cifs local-group: %s" % error) + + def apply(self): + current = self.get_cifs_local_group_rest() + rename = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and 'from_name' in self.parameters: + group_info = self.get_cifs_local_group_rest(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(group_info, current) + if rename: + current = group_info + cd_action = None + else: + self.module.fail_json(msg='Error renaming cifs local group: %s - no cifs local group with from_name: %s.' + % (self.parameters['name'], self.parameters['from_name'])) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_cifs_local_group_rest() + elif cd_action == 'delete': + self.delete_cifs_local_group_rest() + if modify or rename: + self.modify_cifs_local_group_rest(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap Cifs Local Group object and runs the correct play task + """ + obj = NetAppOntapCifsLocalGroup() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py new file mode 100644 index 000000000..3003bd3bf --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_group_member.py @@ -0,0 +1,292 @@ +#!/usr/bin/python + +# (c) 2021-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_ontap_cifs_local_group_member +short_description: NetApp Ontap - Add or remove CIFS local group member +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.2.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Add or remove CIFS local group member +options: + state: + description: + - Whether the specified member should be part of the CIFS local group + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver that owns the CIFS local group + required: true + type: str + + group: + description: + - Specifies name of the CIFS local group + required: true + type: str + + member: + description: + - Specifies the name of the member + required: true + type: str + +notes: + - Supports check_mode. + - Supported with ZAPI. + - Supported with REST starting with ONTAP 9.10.1. +""" + +EXAMPLES = """ + - name: Add member to CIFS local group + netapp.ontap.na_ontap_cifs_local_group_member: + state: present + vserver: svm1 + group: BUILTIN\\administrators + member: DOMAIN\\Domain Admins + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ontapi: "{{ ontap_facts.ontap_version }}" + https: true + validate_certs: false + + - name: Remove member from CIFS local group + netapp.ontap.na_ontap_cifs_local_group_member: + state: absent + vserver: svm1 + group: BUILTIN\\administrators + member: DOMAIN\\Domain Admins + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ontapi: "{{ ontap_facts.ontap_version }}" + https: true + validate_certs: false +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapCifsLocalGroupMember: + """ + Add or remove CIFS local group members + """ + def __init__(self): + """ + Initialize the Ontap CifsLocalGroupMember class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + group=dict(required=True, type='str'), + member=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + msg = 'REST requires ONTAP 9.10.1 or later for cifs_local_group_member APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + self.svm_uuid = None + self.sid = None + + if not self.use_rest: + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_cifs_local_group_rest(self): + """ + Retrieves the local group of an SVM. + """ + api = "protocols/cifs/local-groups" + query = { + 'name': self.parameters['group'], + 'svm.name': self.parameters['vserver'], + 'fields': 'svm.uuid,sid' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching cifs local-group: %s" % error) + if record: + self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid']) + self.sid = self.na_helper.safe_get(record, ['sid']) + if record is None: + self.module.fail_json( + msg='CIFS local group %s does not exist on vserver %s' % + (self.parameters['group'], self.parameters['vserver']) + ) + + def get_cifs_local_group_member(self): + """ + Retrieves local users, Active Directory users and + Active Directory groups which are members of the specified local group and SVM. + """ + return_value = None + + if self.use_rest: + self.get_cifs_local_group_rest() + api = 'protocols/cifs/local-groups/%s/%s/members' % (self.svm_uuid, self.sid) + query = { + 'name': self.parameters['member'], + 'svm.name': self.parameters['vserver'], + 'fields': 'name', + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json( + msg='Error getting CIFS local group members for group %s on vserver %s: %s' % + (self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + if record: + return { + 'member': self.na_helper.safe_get(record, ['name']) + } + return record + + else: + group_members_get_iter = netapp_utils.zapi.NaElement('cifs-local-group-members-get-iter') + group_members_info = netapp_utils.zapi.NaElement('cifs-local-group-members') + group_members_info.add_new_child('group-name', self.parameters['group']) + group_members_info.add_new_child('vserver', self.parameters['vserver']) + group_members_info.add_new_child('member', self.parameters['member']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(group_members_info) + group_members_get_iter.add_child_elem(query) + + try: + result = self.server.invoke_successfully(group_members_get_iter, True) + if result.get_child_by_name('attributes-list'): + group_member_policy_attributes = result['attributes-list']['cifs-local-group-members'] + + return_value = { + 'group': group_member_policy_attributes['group-name'], + 'member': group_member_policy_attributes['member'], + 'vserver': group_member_policy_attributes['vserver'] + } + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error getting CIFS local group members for group %s on vserver %s: %s' % + (self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + return return_value + + def add_cifs_local_group_member(self): + """ + Adds a member to a CIFS local group + """ + if self.use_rest: + api = 'protocols/cifs/local-groups/%s/%s/members' % (self.svm_uuid, self.sid) + body = {'name': self.parameters['member']} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json( + msg='Error adding member %s to cifs local group %s on vserver %s: %s' % + (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + else: + group_members_obj = netapp_utils.zapi.NaElement("cifs-local-group-members-add-members") + group_members_obj.add_new_child("group-name", self.parameters['group']) + member_names = netapp_utils.zapi.NaElement("member-names") + member_names.add_new_child('cifs-name', self.parameters['member']) + group_members_obj.add_child_elem(member_names) + + try: + self.server.invoke_successfully(group_members_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error adding member %s to cifs local group %s on vserver %s: %s' % + (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def remove_cifs_local_group_member(self): + """ + Removes a member from a CIFS local group + """ + if self.use_rest: + api = 'protocols/cifs/local-groups/%s/%s/members' % (self.svm_uuid, self.sid) + body = {'name': self.parameters['member']} + dummy, error = rest_generic.delete_async(self.rest_api, api, None, body) + if error: + self.module.fail_json( + msg='Error removing member %s from cifs local group %s on vserver %s: %s' % + (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + else: + group_members_obj = netapp_utils.zapi.NaElement("cifs-local-group-members-remove-members") + group_members_obj.add_new_child("group-name", self.parameters['group']) + member_names = netapp_utils.zapi.NaElement("member-names") + member_names.add_new_child('cifs-name', self.parameters['member']) + group_members_obj.add_child_elem(member_names) + + try: + self.server.invoke_successfully(group_members_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error removing member %s from cifs local group %s on vserver %s: %s' % + (self.parameters['member'], self.parameters['group'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def apply(self): + current = self.get_cifs_local_group_member() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.add_cifs_local_group_member() + elif cd_action == 'delete': + self.remove_cifs_local_group_member() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap Cifs Local Group Member object and runs the correct play task + """ + obj = NetAppOntapCifsLocalGroupMember() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py new file mode 100644 index 000000000..c594e76bb --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user.py @@ -0,0 +1,244 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_cifs_local_user +short_description: NetApp ONTAP local CIFS user. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.2.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - Create/Modify/Delete a local CIFS user +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified CIFS share should exist or not. + type: str + default: present + + name: + description: + - The name of the local cifs user + required: true + type: str + + vserver: + description: + - the name of the data vserver to use. + required: true + type: str + + account_disabled: + description: + - Whether the local cifs user is disabled or not + type: bool + + description: + description: + - the description for the local cifs user + type: str + + full_name: + description: + - the full name for the local cifs user + type: str + + user_password: + description: + - Password for new user + type: str + + set_password: + description: + - Modify the existing user password + - Module is not idempotent when set to True + type: bool + default: False + ''' + +EXAMPLES = """ + - name: create local cifs user + netapp.ontap.na_ontap_cifs_local_user: + state: present + vserver: ansibleSVM_cifs + name: carchi-cifs2 + user_password: mypassword + account_disabled: False + full_name: Chris Archibald + description: A user account for Chris + + - name: modify local cifs user + netapp.ontap.na_ontap_cifs_local_user: + state: present + vserver: ansibleSVM_cifs + name: carchi-cifs2 + account_disabled: False + full_name: Christopher Archibald + description: A user account for Chris Archibald + + - name: Change local cifs user password + netapp.ontap.na_ontap_cifs_local_user: + state: present + vserver: ansibleSVM_cifs + name: carchi-cifs2 + user_password: mypassword2 + set_password: True + account_disabled: False + full_name: Christopher Archibald + description: A user account for Chris Archibald + + - name: delete local cifs user + netapp.ontap.na_ontap_cifs_local_user: + state: absent + vserver: ansibleSVM_cifs + name: carchi-cifs2 +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppOntapCifsLocalUser: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + account_disabled=dict(required=False, type='bool'), + full_name=dict(required=False, type='str'), + description=dict(required=False, type='str'), + user_password=dict(required=False, type='str', no_log=True), + set_password=dict(required=False, type='bool', default=False) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.sid = None + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_cifs_local_user', 9, 10, 1) + + def get_cifs_local_user(self): + self.get_svm_uuid() + api = 'protocols/cifs/local-users' + fields = 'account_disabled,description,full_name,name,sid' + params = {'svm.uuid': self.svm_uuid, 'name': self.parameters['name'], 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching cifs/local-user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if record: + return self.format_record(record) + return None + + def get_svm_uuid(self): + self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + + def format_record(self, record): + self.sid = record['sid'] + try: + record['name'] = record['name'].split('\\')[1] + except SyntaxError: + self.module.fail_json(msg='Error fetching cifs/local-user') + return record + + def create_cifs_local_user(self): + api = 'protocols/cifs/local-users' + body = { + 'svm.uuid': self.svm_uuid, + 'name': self.parameters['name'], + } + if self.parameters.get('user_password') is not None: + body['password'] = self.parameters['user_password'] + if self.parameters.get('full_name') is not None: + body['full_name'] = self.parameters['full_name'] + if self.parameters.get('description') is not None: + body['description'] = self.parameters['description'] + if self.parameters.get('account_disabled') is not None: + body['account_disabled'] = self.parameters['account_disabled'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating CIFS local users with name %s: %s" % (self.parameters['name'], error)) + + def delete_cifs_local_user(self): + api = 'protocols/cifs/local-users' + uuids = '%s/%s' % (self.svm_uuid, self.sid) + dummy, error = rest_generic.delete_async(self.rest_api, api, uuids) + if error: + self.module.fail_json(msg='Error while deleting CIFS local user: %s' % error) + + def modify_cifs_local_user(self, modify): + api = 'protocols/cifs/local-users' + uuids = '%s/%s' % (self.svm_uuid, self.sid) + body = {} + if modify.get('full_name') is not None: + body['full_name'] = self.parameters['full_name'] + if modify.get('description') is not None: + body['description'] = self.parameters['description'] + if modify.get('account_disabled') is not None: + body['account_disabled'] = self.parameters['account_disabled'] + if self.parameters['set_password'] and modify.get('user_password') is not None: + body['password'] = self.parameters['user_password'] + dummy, error = rest_generic.patch_async(self.rest_api, api, uuids, body) + if error: + self.module.fail_json(msg='Error while modifying CIFS local user: %s' % error) + + def apply(self): + current = self.get_cifs_local_user() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.parameters['set_password'] and self.parameters.get('user_password') is not None: + if not modify: + modify = {} + self.na_helper.changed = True + modify.update({'user_password': self.parameters['user_password']}) + self.module.warn("forcing a password change as set_password is true") + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_cifs_local_user() + elif cd_action == 'delete': + self.delete_cifs_local_user() + elif modify: + self.modify_cifs_local_user(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapCifsLocalUser() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py new file mode 100644 index 000000000..0e43bd078 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_modify.py @@ -0,0 +1,235 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_cifs_local_user_modify +short_description: NetApp ONTAP modify local CIFS user. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Modify a local CIFS user +options: + name: + description: + - The name of the local cifs user + required: true + type: str + + vserver: + description: + - the name of the data vserver to use. + required: true + type: str + + is_account_disabled: + description: + - Whether the local cifs user is disabled or not + type: bool + + description: + description: + - the description for the local cifs user + type: str + + full_name: + description: + - the full name for the local cifs user + type: str + ''' + +EXAMPLES = """ + - name: Enable local CIFS Administrator account + na_ontap_cifs_local_user_modify: + name: BUILTIN\\administrators + vserver: ansible + is_account_disabled: false + username: '{{ username }}' + password: '{{ password }}' + hostname: '{{ hostname }}' + + - name: Disable local CIFS Administrator account + na_ontap_cifs_local_user_modify: + name: BUILTIN\\administrators + vserver: ansible + is_account_disabled: true + username: '{{ username }}' + password: '{{ password }}' + hostname: '{{ hostname }}' + +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapCifsLocalUserModify(): + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + is_account_disabled=dict(required=False, type='bool'), + full_name=dict(required=False, type='str'), + description=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + self.module.warn('This module is deprecated and na_ontap_cifs_local_user should be used instead') + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_cifs_local_user(self): + """ + Return a CIFS local user + :return: None if there is no CIFS local user matching + """ + return_value = None + if self.use_rest: + api = "private/cli/vserver/cifs/users-and-groups/local-user" + query = { + 'fields': 'user-name,full-name,is-account-disabled,description', + 'user-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + record, error = rest_generic.get_one_record(self.rest_api, api, query=query) + if error: + self.module.fail_json(msg=error) + if record: + return_value = { + 'name': record['user_name'], + 'is_account_disabled': record['is_account_disabled'], + 'vserver': record['vserver'], + 'description': record.get('description', ''), + 'full_name': record.get('full_name', '') + } + else: + cifs_local_user_obj = netapp_utils.zapi.NaElement('cifs-local-user-get-iter') + cifs_local_user_info = netapp_utils.zapi.NaElement('cifs-local-user') + cifs_local_user_info.add_new_child('user-name', self.parameters['name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(cifs_local_user_info) + cifs_local_user_obj.add_child_elem(query) + try: + result = self.server.invoke_successfully(cifs_local_user_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting user %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()) + + if result.get_child_by_name('attributes-list'): + local_cifs_user_attributes = result['attributes-list']['cifs-local-user'] + + return_value = { + 'name': local_cifs_user_attributes['user-name'], + 'is_account_disabled': self.na_helper.get_value_for_bool(from_zapi=True, value=local_cifs_user_attributes['is-account-disabled']), + 'vserver': local_cifs_user_attributes['vserver'], + 'full_name': '', + 'description': '', + } + + if local_cifs_user_attributes['full-name']: + return_value['full_name'] = local_cifs_user_attributes['full-name'] + + if local_cifs_user_attributes['description']: + return_value['description'] = local_cifs_user_attributes['description'] + + return return_value + + def modify_cifs_local_user(self, modify): + """ + Modifies a local cifs user + :return: None + """ + if self.use_rest: + api = "private/cli/vserver/cifs/users-and-groups/local-user" + query = { + "user-name": self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + dummy, error = self.rest_api.patch(api, modify, query) + if error: + self.module.fail_json(msg=error, modify=modify) + else: + cifs_local_user_obj = netapp_utils.zapi.NaElement("cifs-local-user-modify") + cifs_local_user_obj.add_new_child('user-name', self.parameters['name']) + cifs_local_user_obj.add_new_child('is-account-disabled', + self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_account_disabled'])) + + if 'full_name' in self.parameters: + cifs_local_user_obj.add_new_child('full-name', self.parameters['full_name']) + + if 'description' in self.parameters: + cifs_local_user_obj.add_new_child('description', self.parameters['description']) + + try: + self.server.invoke_successfully(cifs_local_user_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error modifying local CIFS user %s on vserver %s: %s" % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()) + + def apply(self): + current = self.get_cifs_local_user() + if not current: + error = "User %s does not exist on vserver %s" % (self.parameters['name'], self.parameters['vserver']) + self.module.fail_json(msg=error) + + if self.use_rest: + # name is a key, and REST does not allow to change it + # it should match anyway, but REST may prepend the domain name + self.parameters['name'] = current['name'] + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + self.modify_cifs_local_user(modify) + + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapCifsLocalUserModify() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py new file mode 100644 index 000000000..00f6d0804 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_local_user_set_password.py @@ -0,0 +1,162 @@ +#!/usr/bin/python + +# (c) 2021-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_cifs_local_user_set_password +short_description: NetApp ONTAP set local CIFS user password +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.8.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - Sets the password for the specified local user. + - NOTE - This module is not idempotent. + - Password must meet the following criteria + - The password must be at least six characters in length. + - The password must not contain user account name. + - The password must contain characters from three of the following four + - English uppercase characters (A through Z) + - English lowercase characters (a through z) + - Base 10 digits (0 through 9) + - Special characters + +options: + vserver: + description: + - name of the vserver. + required: true + type: str + + user_name: + description: + - The name of the local CIFS user to set the password for. + required: true + type: str + + user_password: + description: + - The password to set for the local CIFS user. + required: true + type: str +''' + +EXAMPLES = ''' + - name: Set local CIFS pasword for BUILTIN Administrator account + netapp.ontap.na_ontap_cifs_local_user_set_password: + user_name: Administrator + user_password: Test123! + vserver: ansible + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppONTAPCifsSetPassword: + ''' + Set CIFS local user password. + ''' + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + vserver=dict(required=True, type='str'), + user_name=dict(required=True, type='str'), + user_password=dict(required=True, type='str', no_log=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.svm_uuid = None + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + msg = 'REST requires ONTAP 9.10.1 or later for protocols/cifs/local-users APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def cifs_local_set_passwd(self): + """ + :return: None + """ + if self.use_rest: + return self.cifs_local_set_passwd_rest() + cifs_local_set_passwd = netapp_utils.zapi.NaElement('cifs-local-user-set-password') + cifs_local_set_passwd.add_new_child('user-name', self.parameters['user_name']) + cifs_local_set_passwd.add_new_child('user-password', self.parameters['user_password']) + + try: + self.server.invoke_successfully(cifs_local_set_passwd, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error setting password for local CIFS user %s on vserver %s: %s' + % (self.parameters['user_name'], self.parameters['vserver'], to_native(e)), + exception=traceback.format_exc()) + + def cifs_local_set_passwd_rest(self): + self.get_svm_uuid() + sid = self.get_user_sid() + api = 'protocols/cifs/local-users' + uuids = '%s/%s' % (self.svm_uuid, sid) + body = {'password': self.parameters['user_password']} + dummy, error = rest_generic.patch_async(self.rest_api, api, uuids, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error change password for user %s: %s' % (self.parameters['user_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_svm_uuid(self): + self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + + def get_user_sid(self): + api = 'protocols/cifs/local-users' + fields = 'sid' + params = {'svm.uuid': self.svm_uuid, 'name': self.parameters['user_name'], 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching cifs/local-user %s: %s' % (self.parameters['user_name'], to_native(error)), + exception=traceback.format_exc()) + if record: + return record['sid'] + self.module.fail_json(msg='Error no cifs/local-user with name %s' % (self.parameters['user_name'])) + + def apply(self): + changed = True + if not self.module.check_mode: + self.cifs_local_set_passwd() + + self.module.exit_json(changed=changed) + + +def main(): + obj = NetAppONTAPCifsSetPassword() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py new file mode 100644 index 000000000..8a65dd6c5 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cifs_server.py @@ -0,0 +1,619 @@ +#!/usr/bin/python +""" this is cifs_server module + + (c) 2018-2022, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: na_ontap_cifs_server +short_description: NetApp ONTAP CIFS server configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Creating / deleting and modifying the CIFS server . + +options: + + state: + description: + - Whether the specified cifs_server should exist or not. + default: present + choices: ['present', 'absent'] + type: str + + service_state: + description: + - CIFS Server Administrative Status. + choices: ['stopped', 'started'] + type: str + + name: + description: + - Specifies the cifs_server name. + required: true + aliases: ['cifs_server_name'] + type: str + + admin_user_name: + description: + - Specifies the cifs server admin username. + - When used with absent, the account will be deleted if admin_password is also provided. + type: str + + admin_password: + description: + - Specifies the cifs server admin password. + - When used with absent, the account will be deleted if admin_user_name is also provided. + type: str + + domain: + description: + - The Fully Qualified Domain Name of the Windows Active Directory this CIFS server belongs to. + type: str + + workgroup: + description: + - The NetBIOS name of the domain or workgroup this CIFS server belongs to. + type: str + + ou: + description: + - The Organizational Unit (OU) within the Windows Active Directory this CIFS server belongs to. + version_added: 2.7.0 + type: str + + force: + type: bool + description: + - When state is present, if this is set and a machine account with the same name as specified in 'name' exists in the Active Directory, + it will be overwritten and reused. + - When state is absent, if this is set, the local CIFS configuration is deleted regardless of communication errors. + - For REST, it requires ontap version 9.11. + version_added: 2.7.0 + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + + from_name: + description: + - Specifies the existing cifs_server name. + - This option is used to rename cifs_server. + - Supported only in REST and requires force to be set to True. + - Requires ontap version 9.11.0. + - if the service is running, it will be stopped to perform the rename action, and automatically restarts. + - if the service is stopped, it will be briefly restarted after the rename action, and stopped again. + type: str + version_added: 21.19.0 + + encrypt_dc_connection: + description: + - Specifies whether encryption is required for domain controller connections. + - Only supported with REST and requires ontap version 9.8 or later. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + kdc_encryption: + description: + - Specifies whether AES-128 and AES-256 encryption is enabled for all Kerberos-based communication with the Active Directory KDC. + - Only supported with REST. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + smb_encryption: + description: + - Determine whether SMB encryption is required for incoming CIFS traffic. + - Only supported with REST. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + smb_signing: + description: + - Specifies whether signing is required for incoming CIFS traffic. + - Only supported with REST. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + restrict_anonymous: + description: + - Specifies what level of access an anonymous user is granted. + - Only supported with REST. + choices: ['no_enumeration', 'no_restriction', 'no_access'] + type: str + version_added: 21.20.0 + + aes_netlogon_enabled: + description: + - Specifies whether or not an AES session key is enabled for the Netlogon channel. + - Only supported with REST and requires ontap version 9.10.1 or later. + type: bool + version_added: 21.20.0 + + ldap_referral_enabled: + description: + - Specifies whether or not LDAP referral chasing is enabled for AD LDAP connections. + - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + use_ldaps: + description: + - Specifies whether or not to use use LDAPS for secure Active Directory LDAP connections. + - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + use_start_tls: + description: + - Specifies whether or not to use SSL/TLS for allowing secure LDAP communication with Active Directory LDAP servers. + - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + try_ldap_channel_binding: + description: + - Specifies whether or not channel binding is attempted in the case of TLS/LDAPS. + - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI. + type: bool + version_added: 21.20.0 + + session_security: + description: + - Specifies client session security for AD LDAP connections. + - Only supported with REST and requires ontap version 9.10.1 or later. Use na_ontap_vserver_cifs_security with ZAPI. + choices: ['none', 'sign', 'seal'] + type: str + version_added: 21.20.0 + +''' + +EXAMPLES = ''' + - name: Create cifs_server + netapp.ontap.na_ontap_cifs_server: + state: present + name: data2 + vserver: svm1 + service_state: stopped + domain: "{{ id_domain }}" + admin_user_name: "{{ domain_login }}" + admin_password: "{{ domain_pwd }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete cifs_server + netapp.ontap.na_ontap_cifs_server: + state: absent + name: data2 + vserver: svm1 + admin_user_name: "{{ domain_login }}" + admin_password: "{{ domain_pwd }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Start cifs_server + netapp.ontap.na_ontap_cifs_server: + state: present + name: data2 + vserver: svm1 + service_state: started + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Stop cifs_server + netapp.ontap.na_ontap_cifs_server: + state: present + name: data2 + vserver: svm1 + service_state: stopped + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Rename cifs_server - REST + netapp.ontap.na_ontap_cifs_server: + state: present + from_name: data2 + name: cifs + vserver: svm1 + force: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify cifs_server security - REST + netapp.ontap.na_ontap_cifs_server: + state: present + name: data2 + vserver: svm1 + service_state: stopped + encrypt_dc_connection: True, + smb_encryption: True, + kdc_encryption: True, + smb_signing: True, + aes_netlogon_enabled: True, + ldap_referral_enabled: True, + session_security: seal, + try_ldap_channel_binding: False, + use_ldaps: True, + use_start_tls": True + restrict_anonymous: no_access + domain: "{{ id_domain }}" + admin_user_name: "{{ domain_login }}" + admin_password: "{{ domain_pwd }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapcifsServer: + """ + object to describe cifs_server info + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + service_state=dict(required=False, choices=['stopped', 'started']), + name=dict(required=True, type='str', aliases=['cifs_server_name']), + workgroup=dict(required=False, type='str', default=None), + domain=dict(required=False, type='str'), + admin_user_name=dict(required=False, type='str'), + admin_password=dict(required=False, type='str', no_log=True), + ou=dict(required=False, type='str'), + force=dict(required=False, type='bool'), + vserver=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + smb_signing=dict(required=False, type='bool'), + encrypt_dc_connection=dict(required=False, type='bool'), + kdc_encryption=dict(required=False, type='bool'), + smb_encryption=dict(required=False, type='bool'), + restrict_anonymous=dict(required=False, type='str', choices=['no_enumeration', 'no_restriction', 'no_access']), + aes_netlogon_enabled=dict(required=False, type='bool'), + ldap_referral_enabled=dict(required=False, type='bool'), + session_security=dict(required=False, type='str', choices=['none', 'sign', 'seal']), + try_ldap_channel_binding=dict(required=False, type='bool'), + use_ldaps=dict(required=False, type='bool'), + use_start_tls=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('use_ldaps', 'use_start_tls')] + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.parameters['cifs_server_name'] = self.parameters['name'] + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['workgroup'] + partially_supported_rest_properties = [['encrypt_dc_connection', (9, 8)], ['aes_netlogon_enabled', (9, 10, 1)], ['ldap_referral_enabled', (9, 10, 1)], + ['session_security', (9, 10, 1)], ['try_ldap_channel_binding', (9, 10, 1)], ['use_ldaps', (9, 10, 1)], + ['use_start_tls', (9, 10, 1)], ['force', (9, 11)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + + if not self.use_rest: + unsupported_zapi_properties = ['smb_signing', 'encrypt_dc_connection', 'kdc_encryption', 'smb_encryption', 'restrict_anonymous', + 'aes_netlogon_enabled', 'ldap_referral_enabled', 'try_ldap_channel_binding', 'session_security', + 'use_ldaps', 'use_start_tls', 'from_name'] + used_unsupported_zapi_properties = [option for option in unsupported_zapi_properties if option in self.parameters] + if used_unsupported_zapi_properties: + self.module.fail_json(msg="Error: %s options supported only with REST." % " ,".join(used_unsupported_zapi_properties)) + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_cifs_server(self): + """ + Return details about the CIFS-server + :param: + name : Name of the name of the cifs_server + + :return: Details about the cifs_server. None if not found. + :rtype: dict + """ + cifs_server_info = netapp_utils.zapi.NaElement('cifs-server-get-iter') + cifs_server_attributes = netapp_utils.zapi.NaElement('cifs-server-config') + cifs_server_attributes.add_new_child('cifs-server', self.parameters['cifs_server_name']) + cifs_server_attributes.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(cifs_server_attributes) + cifs_server_info.add_child_elem(query) + result = self.server.invoke_successfully(cifs_server_info, True) + return_value = None + + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + + cifs_server_attributes = result.get_child_by_name('attributes-list').\ + get_child_by_name('cifs-server-config') + service_state = cifs_server_attributes.get_child_content('administrative-status') + return_value = { + 'cifs_server_name': self.parameters['cifs_server_name'], + 'service_state': 'started' if service_state == 'up' else 'stopped' + } + return return_value + + def create_cifs_server(self): + """ + calling zapi to create cifs_server + """ + options = {'cifs-server': self.parameters['cifs_server_name']} + if 'service_state' in self.parameters: + options['administrative-status'] = 'up' if self.parameters['service_state'] == 'started' else 'down' + if 'workgroup' in self.parameters: + options['workgroup'] = self.parameters['workgroup'] + if 'domain' in self.parameters: + options['domain'] = self.parameters['domain'] + if 'admin_user_name' in self.parameters: + options['admin-username'] = self.parameters['admin_user_name'] + if 'admin_password' in self.parameters: + options['admin-password'] = self.parameters['admin_password'] + if 'ou' in self.parameters: + options['organizational-unit'] = self.parameters['ou'] + if 'force' in self.parameters: + options['force-account-overwrite'] = str(self.parameters['force']).lower() + + cifs_server_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-server-create', **options) + + try: + self.server.invoke_successfully(cifs_server_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error Creating cifs_server %s: %s' % + (self.parameters['cifs_server_name'], to_native(exc)), exception=traceback.format_exc()) + + def delete_cifs_server(self): + """ + calling zapi to create cifs_server + """ + options = {} + if 'admin_user_name' in self.parameters: + options['admin-username'] = self.parameters['admin_user_name'] + if 'admin_password' in self.parameters: + options['admin-password'] = self.parameters['admin_password'] + if 'force' in self.parameters: + options['force-account-delete'] = str(self.parameters['force']).lower() + + if options: + cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete', **options) + else: + cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete') + + try: + self.server.invoke_successfully(cifs_server_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error deleting cifs_server %s: %s' % (self.parameters['cifs_server_name'], to_native(exc)), + exception=traceback.format_exc()) + + def start_cifs_server(self): + """ + RModify the cifs_server. + """ + cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-server-start') + try: + self.server.invoke_successfully(cifs_server_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.parameters['cifs_server_name'], to_native(e)), + exception=traceback.format_exc()) + + def stop_cifs_server(self): + """ + RModify the cifs_server. + """ + cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'cifs-server-stop') + try: + self.server.invoke_successfully(cifs_server_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.parameters['cifs_server_name'], to_native(e)), + exception=traceback.format_exc()) + + def get_cifs_server_rest(self, from_name=None): + """ + get details of the cifs_server. + """ + if not self.use_rest: + return self.get_cifs_server() + query = {'svm.name': self.parameters['vserver'], + 'fields': 'svm.uuid,' + 'enabled,' + 'security.smb_encryption,' + 'security.kdc_encryption,' + 'security.smb_signing,' + 'security.restrict_anonymous,'} + query['name'] = from_name or self.parameters['cifs_server_name'] + api = 'protocols/cifs/services' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8): + query['fields'] += 'security.encrypt_dc_connection,' + + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + security_option_9_10 = ('security.use_ldaps,' + 'security.use_start_tls,' + 'security.try_ldap_channel_binding,' + 'security.session_security,' + 'security.ldap_referral_enabled,' + 'security.aes_netlogon_enabled,') + query['fields'] += security_option_9_10 + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching cifs: %s" % error) + if record: + record['service_state'] = 'started' if record.pop('enabled') else 'stopped' + return { + 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])}, + 'cifs_server_name': self.na_helper.safe_get(record, ['name']), + 'service_state': self.na_helper.safe_get(record, ['service_state']), + 'smb_signing': self.na_helper.safe_get(record, ['security', 'smb_signing']), + 'encrypt_dc_connection': self.na_helper.safe_get(record, ['security', 'encrypt_dc_connection']), + 'kdc_encryption': self.na_helper.safe_get(record, ['security', 'kdc_encryption']), + 'smb_encryption': self.na_helper.safe_get(record, ['security', 'smb_encryption']), + 'aes_netlogon_enabled': self.na_helper.safe_get(record, ['security', 'aes_netlogon_enabled']), + 'ldap_referral_enabled': self.na_helper.safe_get(record, ['security', 'ldap_referral_enabled']), + 'session_security': self.na_helper.safe_get(record, ['security', 'session_security']), + 'try_ldap_channel_binding': self.na_helper.safe_get(record, ['security', 'try_ldap_channel_binding']), + 'use_ldaps': self.na_helper.safe_get(record, ['security', 'use_ldaps']), + 'use_start_tls': self.na_helper.safe_get(record, ['security', 'use_start_tls']), + 'restrict_anonymous': self.na_helper.safe_get(record, ['security', 'restrict_anonymous']) + } + return record + + def build_ad_domain(self): + ad_domain = {} + if 'admin_user_name' in self.parameters: + ad_domain['user'] = self.parameters['admin_user_name'] + if 'admin_password' in self.parameters: + ad_domain['password'] = self.parameters['admin_password'] + if 'ou' in self.parameters: + ad_domain['organizational_unit'] = self.parameters['ou'] + if 'domain' in self.parameters: + ad_domain['fqdn'] = self.parameters['domain'] + return ad_domain + + def create_modify_body_rest(self, params=None): + """ + Function to define body for create and modify cifs server + """ + body, query, security = {}, {}, {} + if params is None: + params = self.parameters + security_options = ['smb_signing', 'encrypt_dc_connection', 'kdc_encryption', 'smb_encryption', 'restrict_anonymous', + 'aes_netlogon_enabled', 'ldap_referral_enabled', 'try_ldap_channel_binding', 'session_security', 'use_ldaps', 'use_start_tls'] + ad_domain = self.build_ad_domain() + if ad_domain: + body['ad_domain'] = ad_domain + if 'force' in self.parameters: + query['force'] = self.parameters['force'] + for key in security_options: + if key in params: + security[key] = params[key] + if security: + body['security'] = security + if 'vserver' in params: + body['svm.name'] = params['vserver'] + if 'cifs_server_name' in params: + body['name'] = self.parameters['cifs_server_name'] + if 'service_state' in params: + body['enabled'] = params['service_state'] == 'started' + return body, query + + def create_cifs_server_rest(self): + """ + create the cifs_server. + """ + if not self.use_rest: + return self.create_cifs_server() + body, query = self.create_modify_body_rest() + api = 'protocols/cifs/services' + dummy, error = rest_generic.post_async(self.rest_api, api, body, query) + if error is not None: + self.module.fail_json(msg="Error on creating cifs: %s" % error) + + def delete_cifs_server_rest(self, current): + """ + delete the cifs_server. + """ + if not self.use_rest: + return self.delete_cifs_server() + ad_domain = self.build_ad_domain() + body = {'ad_domain': ad_domain} if ad_domain else None + query = {} + if 'force' in self.parameters: + query['force'] = self.parameters['force'] + api = 'protocols/cifs/services' + dummy, error = rest_generic.delete_async(self.rest_api, api, current['svm']['uuid'], query, body=body) + if error is not None: + self.module.fail_json(msg="Error on deleting cifs server: %s" % error) + + def modify_cifs_server_rest(self, current, modify): + """ + Modify the state of CIFS server. + rename: cifs server should be in stopped state + """ + if not self.use_rest: + return self.modify_cifs_server() + body, query = self.create_modify_body_rest(modify) + api = 'protocols/cifs/services' + dummy, error = rest_generic.patch_async(self.rest_api, api, current['svm']['uuid'], body, query) + if error is not None: + self.module.fail_json(msg="Error on modifying cifs server: %s" % error) + + def modify_cifs_server(self): + """ + Start or stop cifs server in ZAPI. + """ + if self.parameters.get('service_state') == 'stopped': + self.stop_cifs_server() + else: + self.start_cifs_server() + + def apply(self): + current = self.get_cifs_server_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and 'from_name' in self.parameters: + current = self.get_cifs_server_rest(self.parameters['from_name']) + if current is None: + self.module.fail_json(msg='Error renaming cifs server: %s - no cifs server with from_name: %s.' + % (self.parameters['name'], self.parameters['from_name'])) + if not self.parameters.get('force'): + self.module.fail_json(msg='Error: cannot rename cifs server from %s to %s without force.' + % (self.parameters['from_name'], self.parameters['name'])) + # rename is handled in modify in REST. + cd_action = None + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_cifs_server_rest() + elif cd_action == 'delete': + self.delete_cifs_server_rest(current) + else: + self.modify_cifs_server_rest(current, modify) + # rename will enable the cifs server also, so disable it if service_state is stopped. + if 'cifs_server_name' in modify and self.parameters.get('service_state') == 'stopped': + self.modify_cifs_server_rest(current, {'service_state': 'stopped'}) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + cifs_server = NetAppOntapcifsServer() + cifs_server.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py new file mode 100644 index 000000000..fb0f507fc --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster.py @@ -0,0 +1,776 @@ +#!/usr/bin/python + +# (c) 2017-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_cluster +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_cluster +short_description: NetApp ONTAP cluster - create a cluster and add/remove nodes. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create ONTAP cluster. + - Add or remove cluster nodes using cluster_ip_address. + - Adding a node requires ONTAP 9.3 or better. + - Removing a node requires ONTAP 9.4 or better. +options: + state: + description: + - Whether the specified cluster should exist (deleting a cluster is not supported). + - Whether the node identified by its cluster_ip_address should be in the cluster or not. + choices: ['present', 'absent'] + type: str + default: present + cluster_name: + description: + - The name of the cluster to manage. + type: str + cluster_ip_address: + description: + - intra cluster IP address of the node to be added or removed. + type: str + single_node_cluster: + description: + - Whether the cluster is a single node cluster. Ignored for 9.3 or older versions. + - If present, it was observed that 'Cluster' interfaces were deleted, whatever the value with ZAPI. + version_added: 19.11.0 + type: bool + cluster_location: + description: + - Cluster location, only relevant if performing a modify action. + version_added: 19.11.0 + type: str + cluster_contact: + description: + - Cluster contact, only relevant if performing a modify action. + version_added: 19.11.0 + type: str + node_name: + description: + - Name of the node to be added or removed from the cluster. + - Be aware that when adding a node, '-' are converted to '_' by the ONTAP backend. + - When creating a cluster, C(node_name) is ignored. + - When adding a node using C(cluster_ip_address), C(node_name) is optional. + - When used to remove a node, C(cluster_ip_address) and C(node_name) are mutually exclusive. + version_added: 20.9.0 + type: str + time_out: + description: + - time to wait for cluster creation in seconds. + - Error out if task is not completed in defined time. + - if 0, the request is asynchronous. + - default is set to 3 minutes. + default: 180 + type: int + version_added: 21.1.0 + force: + description: + - forcibly remove a node that is down and cannot be brought online to remove its shared resources. + default: false + type: bool + version_added: 21.13.0 + timezone: + description: timezone for the cluster. Only supported by REST. + type: dict + version_added: 21.24.0 + suboptions: + name: + type: str + description: + - The timezone name must be + - A geographic region, usually expressed as area/location + - Greenwich Mean Time (GMT) or the difference in hours from GMT + - A valid alias; that is, a term defined by the standard to refer to a geographic region or GMT + - A system-specific or other term not associated with a geographic region or GMT + - "full list of supported alias can be found here: https://library.netapp.com/ecmdocs/ECMP1155590/html/GUID-D3B8A525-67A2-4BEE-99DB-EF52D6744B5F.html" + - Only supported by REST + +notes: + - supports REST and ZAPI +''' + +EXAMPLES = """ + - name: Create cluster + netapp.ontap.na_ontap_cluster: + state: present + cluster_name: new_cluster + time_out: 0 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Add node to cluster (Join cluster) + netapp.ontap.na_ontap_cluster: + state: present + cluster_ip_address: 10.10.10.10 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Add node to cluster (Join cluster) + netapp.ontap.na_ontap_cluster: + state: present + cluster_ip_address: 10.10.10.10 + node_name: my_preferred_node_name + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Create a 2 node cluster in one call + netapp.ontap.na_ontap_cluster: + state: present + cluster_name: new_cluster + cluster_ip_address: 10.10.10.10 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Remove node from cluster + netapp.ontap.na_ontap_cluster: + state: absent + cluster_ip_address: 10.10.10.10 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Remove node from cluster + netapp.ontap.na_ontap_cluster: + state: absent + node_name: node002 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: modify cluster + netapp.ontap.na_ontap_cluster: + state: present + cluster_contact: testing + cluster_location: testing + cluster_name: "{{ netapp_cluster}}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPCluster: + """ + object initialize and class methods + """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + cluster_name=dict(required=False, type='str'), + cluster_ip_address=dict(required=False, type='str'), + cluster_location=dict(required=False, type='str'), + cluster_contact=dict(required=False, type='str'), + force=dict(required=False, type='bool', default=False), + single_node_cluster=dict(required=False, type='bool'), + node_name=dict(required=False, type='str'), + time_out=dict(required=False, type='int', default=180), + timezone=dict(required=False, type='dict', options=dict( + name=dict(type='str') + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.warnings = [] + # cached, so that we don't call the REST API more than once + self.node_records = None + + if self.parameters['state'] == 'absent' and self.parameters.get('node_name') is not None and self.parameters.get('cluster_ip_address') is not None: + msg = 'when state is "absent", parameters are mutually exclusive: cluster_ip_address|node_name' + self.module.fail_json(msg=msg) + + if self.parameters.get('node_name') is not None and '-' in self.parameters.get('node_name'): + self.warnings.append('ONTAP ZAPI converts "-" to "_", node_name: %s may be changed or not matched' % self.parameters.get('node_name')) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if self.use_rest and self.parameters['state'] == 'absent' and not self.rest_api.meets_rest_minimum_version(True, 9, 7, 0): + self.module.warn('switching back to ZAPI as DELETE is not supported on 9.6') + self.use_rest = False + if not self.use_rest: + if self.na_helper.safe_get(self.parameters, ['timezone', 'name']): + self.module.fail_json(msg='Timezone is only supported with REST') + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_cluster_identity_rest(self): + ''' get cluster information, but the cluster may not exist yet + return: + None if the cluster cannot be reached + a dictionary of attributes + ''' + record, error = rest_generic.get_one_record(self.rest_api, 'cluster', fields='contact,location,name,timezone') + if error: + if 'are available in precluster.' in error: + # assuming precluster state + return None + self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error), + exception=traceback.format_exc()) + if record: + return { + 'cluster_contact': record.get('contact'), + 'cluster_location': record.get('location'), + 'cluster_name': record.get('name'), + 'timezone': self.na_helper.safe_get(record, ['timezone']) + } + return None + + def get_cluster_identity(self, ignore_error=True): + ''' get cluster information, but the cluster may not exist yet + return: + None if the cluster cannot be reached + a dictionary of attributes + ''' + if self.use_rest: + return self.get_cluster_identity_rest() + + zapi = netapp_utils.zapi.NaElement('cluster-identity-get') + try: + result = self.server.invoke_successfully(zapi, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if ignore_error: + return None + self.module.fail_json(msg='Error fetching cluster identity info: %s' % to_native(error), + exception=traceback.format_exc()) + cluster_identity = {} + if result.get_child_by_name('attributes'): + identity_info = result.get_child_by_name('attributes').get_child_by_name('cluster-identity-info') + if identity_info: + cluster_identity['cluster_contact'] = identity_info.get_child_content('cluster-contact') + cluster_identity['cluster_location'] = identity_info.get_child_content('cluster-location') + cluster_identity['cluster_name'] = identity_info.get_child_content('cluster-name') + return cluster_identity + return None + + def get_cluster_nodes_rest(self): + ''' get cluster node names, but the cluster may not exist yet + return: + None if the cluster cannot be reached + a list of nodes + ''' + if self.node_records is None: + records, error = rest_generic.get_0_or_more_records(self.rest_api, 'cluster/nodes', fields='name,uuid,cluster_interfaces') + if error: + self.module.fail_json(msg='Error fetching cluster node info: %s' % to_native(error), + exception=traceback.format_exc()) + self.node_records = records or [] + return self.node_records + + def get_cluster_node_names_rest(self): + ''' get cluster node names, but the cluster may not exist yet + return: + None if the cluster cannot be reached + a list of nodes + ''' + records = self.get_cluster_nodes_rest() + return [record['name'] for record in records] + + def get_cluster_nodes(self, ignore_error=True): + ''' get cluster node names, but the cluster may not exist yet + return: + None if the cluster cannot be reached + a list of nodes + ''' + if self.use_rest: + return self.get_cluster_node_names_rest() + + zapi = netapp_utils.zapi.NaElement('cluster-node-get-iter') + try: + result = self.server.invoke_successfully(zapi, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if ignore_error: + return None + self.module.fail_json(msg='Error fetching cluster node info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('attributes-list'): + cluster_nodes = [] + for node_info in result.get_child_by_name('attributes-list').get_children(): + node_name = node_info.get_child_content('node-name') + if node_name is not None: + cluster_nodes.append(node_name) + return cluster_nodes + return None + + def get_cluster_ip_addresses_rest(self, cluster_ip_address): + ''' get list of IP addresses for this cluster + return: + a list of dictionaries + ''' + if_infos = [] + records = self.get_cluster_nodes_rest() + for record in records: + for interface in record.get('cluster_interfaces', []): + ip_address = self.na_helper.safe_get(interface, ['ip', 'address']) + if cluster_ip_address is None or ip_address == cluster_ip_address: + if_info = { + 'address': ip_address, + 'home_node': record['name'], + } + if_infos.append(if_info) + return if_infos + + def get_cluster_ip_addresses(self, cluster_ip_address, ignore_error=True): + ''' get list of IP addresses for this cluster + return: + a list of dictionaries + ''' + if_infos = [] + zapi = netapp_utils.zapi.NaElement('net-interface-get-iter') + if cluster_ip_address is not None: + query = netapp_utils.zapi.NaElement('query') + net_info = netapp_utils.zapi.NaElement('net-interface-info') + net_info.add_new_child('address', cluster_ip_address) + query.add_child_elem(net_info) + zapi.add_child_elem(query) + + try: + result = self.server.invoke_successfully(zapi, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if ignore_error: + return if_infos + self.module.fail_json(msg='Error getting IP addresses: %s' % to_native(error), + exception=traceback.format_exc()) + + if result.get_child_by_name('attributes-list'): + for net_info in result.get_child_by_name('attributes-list').get_children(): + if net_info: + if_info = {'address': net_info.get_child_content('address')} + if_info['home_node'] = net_info.get_child_content('home-node') + if_infos.append(if_info) + return if_infos + + def get_cluster_ip_address(self, cluster_ip_address, ignore_error=True): + ''' get node information if it is discoverable + return: + None if the cluster cannot be reached + a dictionary of attributes + ''' + if cluster_ip_address is None: + return None + if self.use_rest: + nodes = self.get_cluster_ip_addresses_rest(cluster_ip_address) + else: + nodes = self.get_cluster_ip_addresses(cluster_ip_address, ignore_error=ignore_error) + return nodes if len(nodes) > 0 else None + + def create_cluster_body(self, modify=None, nodes=None): + body = {} + params = modify if modify is not None else self.parameters + for (param_key, rest_key) in { + 'cluster_contact': 'contact', + 'cluster_location': 'location', + 'cluster_name': 'name', + 'single_node_cluster': 'single_node_cluster', + 'timezone': 'timezone' + }.items(): + if param_key in params: + body[rest_key] = params[param_key] + if nodes: + body['nodes'] = nodes + return body + + def create_node_body(self): + node = {} + for (param_key, rest_key) in { + 'cluster_ip_address': 'cluster_interface.ip.address', + 'cluster_location': 'location', + 'node_name': 'name' + }.items(): + if param_key in self.parameters: + node[rest_key] = self.parameters[param_key] + return node + + def create_nodes(self): + node = self.create_node_body() + return [node] if node else None + + def create_cluster_rest(self, older_api=False): + """ + Create a cluster + """ + query = None + body = self.create_cluster_body(nodes=self.create_nodes()) + if 'single_node_cluster' in body: + query = {'single_node_cluster': body.pop('single_node_cluster')} + dummy, error = rest_generic.post_async(self.rest_api, 'cluster', body, query, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating cluster %s: %s' + % (self.parameters['cluster_name'], to_native(error)), + exception=traceback.format_exc()) + + def create_cluster(self, older_api=False): + """ + Create a cluster + """ + if self.use_rest: + return self.create_cluster_rest() + + # Note: cannot use node_name here: + # 13001:The "-node-names" parameter must be used with either the "-node-uuids" or the "-cluster-ips" parameters. + options = {'cluster-name': self.parameters['cluster_name']} + if not older_api and self.parameters.get('single_node_cluster') is not None: + options['single-node-cluster'] = str(self.parameters['single_node_cluster']).lower() + cluster_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'cluster-create', **options) + try: + self.server.invoke_successfully(cluster_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if error.message == "Extra input: single-node-cluster" and not older_api: + return self.create_cluster(older_api=True) + # Error 36503 denotes node already being used. + if to_native(error.code) == "36503": + return False + self.module.fail_json(msg='Error creating cluster %s: %s' + % (self.parameters['cluster_name'], to_native(error)), + exception=traceback.format_exc()) + return True + + def add_node_rest(self): + """ + Add a node to an existing cluster + """ + body = self.create_node_body() + dummy, error = rest_generic.post_async(self.rest_api, 'cluster/nodes', body, job_timeout=120) + if error: + self.module.fail_json(msg='Error adding node with ip %s: %s' + % (self.parameters.get('cluster_ip_address'), to_native(error)), + exception=traceback.format_exc()) + + def add_node(self, older_api=False): + """ + Add a node to an existing cluster + 9.2 and 9.3 do not support cluster-ips so fallback to node-ip + """ + if self.use_rest: + return self.add_node_rest() + + if self.parameters.get('cluster_ip_address') is None: + return False + cluster_add_node = netapp_utils.zapi.NaElement('cluster-add-node') + if older_api: + cluster_add_node.add_new_child('node-ip', self.parameters.get('cluster_ip_address')) + else: + cluster_ips = netapp_utils.zapi.NaElement.create_node_with_children('cluster-ips', **{'ip-address': self.parameters.get('cluster_ip_address')}) + cluster_add_node.add_child_elem(cluster_ips) + if self.parameters.get('node_name') is not None: + node_names = netapp_utils.zapi.NaElement.create_node_with_children('node-names', **{'string': self.parameters.get('node_name')}) + cluster_add_node.add_child_elem(node_names) + + try: + self.server.invoke_successfully(cluster_add_node, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if error.message == "Extra input: cluster-ips" and not older_api: + return self.add_node(older_api=True) + # skip if error says no failed operations to retry. + if to_native(error) == "NetApp API failed. Reason - 13001:There are no failed \"cluster create\" or \"cluster add-node\" operations to retry.": + return False + self.module.fail_json(msg='Error adding node with ip %s: %s' + % (self.parameters.get('cluster_ip_address'), to_native(error)), + exception=traceback.format_exc()) + return True + + def get_uuid_from_ip(self, ip_address): + for node in self.get_cluster_nodes_rest(): + if ip_address in (interface['ip']['address'] for interface in node['cluster_interfaces']): + return node['uuid'] + return None + + def get_uuid_from_name(self, node_name): + for node in self.get_cluster_nodes_rest(): + if node_name == node['name']: + return node['uuid'] + return None + + def get_uuid(self): + if self.parameters.get('cluster_ip_address') is not None: + from_node = self.parameters['cluster_ip_address'] + uuid = self.get_uuid_from_ip(from_node) + elif self.parameters.get('node_name') is not None: + from_node = self.parameters['node_name'] + uuid = self.get_uuid_from_name(from_node) + else: + # Unexpected, for delete one of cluster_ip_address, node_name is required. + uuid = None + if uuid is None: + self.module.fail_json(msg='Internal error, cannot find UUID in %s: for %s or %s' + % (self.get_cluster_nodes_rest(), self.parameters['cluster_ip_address'], self.parameters.get('node_name') is not None), + exception=traceback.format_exc()) + return uuid, from_node + + def remove_node_rest(self): + """ + Remove a node from an existing cluster + """ + uuid, from_node = self.get_uuid() + query = {'force': True} if self.parameters.get('force') else None + dummy, error = rest_generic.delete_async(self.rest_api, 'cluster/nodes', uuid, query, job_timeout=120) + if error: + self.module.fail_json(msg='Error removing node with %s: %s' + % (from_node, to_native(error)), exception=traceback.format_exc()) + + def remove_node(self): + """ + Remove a node from an existing cluster + """ + if self.use_rest: + return self.remove_node_rest() + + cluster_remove_node = netapp_utils.zapi.NaElement('cluster-remove-node') + from_node = '' + # cluster-ip and node-name are mutually exclusive: + # 13115:Element "cluster-ip" within "cluster-remove-node" has been excluded by another element. + if self.parameters.get('cluster_ip_address') is not None: + cluster_remove_node.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address')) + from_node = 'IP: %s' % self.parameters.get('cluster_ip_address') + elif self.parameters.get('node_name') is not None: + cluster_remove_node.add_new_child('node', self.parameters.get('node_name')) + from_node = 'name: %s' % self.parameters.get('node_name') + if self.parameters.get('force'): + cluster_remove_node.add_new_child('force', 'true') + + try: + self.server.invoke_successfully(cluster_remove_node, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if error.message == "Unable to find API: cluster-remove-node": + msg = 'Error: ZAPI is not available. Removing a node requires ONTAP 9.4 or newer.' + self.module.fail_json(msg=msg) + self.module.fail_json(msg='Error removing node with %s: %s' + % (from_node, to_native(error)), exception=traceback.format_exc()) + + def modify_cluster_identity_rest(self, modify): + """ + Modifies the cluster identity + """ + body = self.create_cluster_body(modify) + dummy, error = rest_generic.patch_async(self.rest_api, 'cluster', None, body) + if error: + self.module.fail_json(msg='Error modifying cluster idetity details %s: %s' + % (self.parameters['cluster_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_cluster_identity(self, modify): + """ + Modifies the cluster identity + """ + if self.use_rest: + return self.modify_cluster_identity_rest(modify) + + cluster_modify = netapp_utils.zapi.NaElement('cluster-identity-modify') + if modify.get('cluster_name') is not None: + cluster_modify.add_new_child("cluster-name", modify.get('cluster_name')) + if modify.get('cluster_location') is not None: + cluster_modify.add_new_child("cluster-location", modify.get('cluster_location')) + if modify.get('cluster_contact') is not None: + cluster_modify.add_new_child("cluster-contact", modify.get('cluster_contact')) + + try: + self.server.invoke_successfully(cluster_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying cluster idetity details %s: %s' + % (self.parameters['cluster_name'], to_native(error)), + exception=traceback.format_exc()) + + def cluster_create_wait(self): + """ + Wait whilst cluster creation completes + """ + if self.use_rest: + # wait is part of post_async for REST + return + + cluster_wait = netapp_utils.zapi.NaElement('cluster-create-join-progress-get') + is_complete = False + status = '' + retries = self.parameters['time_out'] + errors = [] + while not is_complete and status not in ('failed', 'success') and retries > 0: + retries = retries - 10 + time.sleep(10) + try: + result = self.server.invoke_successfully(cluster_wait, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + # collecting errors, and retrying + errors.append(repr(error)) + continue + + clus_progress = result.get_child_by_name('attributes') + result = clus_progress.get_child_by_name('cluster-create-join-progress-info') + is_complete = self.na_helper.get_value_for_bool(from_zapi=True, + value=result.get_child_content('is-complete')) + status = result.get_child_content('status') + + if self.parameters['time_out'] == 0: + is_complete = True + if not is_complete and status != 'success': + current_status_message = result.get_child_content('current-status-message') + errors.append('Failed to confirm cluster creation %s: %s' % (self.parameters.get('cluster_name'), current_status_message)) + if retries <= 0: + errors.append("Timeout after %s seconds" % self.parameters['time_out']) + self.module.fail_json(msg='Error creating cluster %s: %s' + % (self.parameters['cluster_name'], str(errors))) + + return is_complete + + def node_add_wait(self): + """ + Wait whilst node is being added to the existing cluster + """ + if self.use_rest: + # wait is part of post_async for REST + return + + cluster_node_status = netapp_utils.zapi.NaElement('cluster-add-node-status-get-iter') + node_status_info = netapp_utils.zapi.NaElement('cluster-create-add-node-status-info') + node_status_info.add_new_child('cluster-ip', self.parameters.get('cluster_ip_address')) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(node_status_info) + cluster_node_status.add_child_elem(query) + + is_complete = None + failure_msg = None + retries = self.parameters['time_out'] + errors = [] + while is_complete != 'success' and is_complete != 'failure' and retries > 0: + retries = retries - 10 + time.sleep(10) + try: + result = self.server.invoke_successfully(cluster_node_status, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if error.message == "Unable to find API: cluster-add-node-status-get-iter": + # This API is not supported for 9.3 or earlier releases, just wait a bit + time.sleep(60) + return + # collecting errors, and retrying + errors.append(repr(error)) + continue + + attributes_list = result.get_child_by_name('attributes-list') + join_progress = attributes_list.get_child_by_name('cluster-create-add-node-status-info') + is_complete = join_progress.get_child_content('status') + failure_msg = join_progress.get_child_content('failure-msg') + + if self.parameters['time_out'] == 0: + is_complete = 'success' + if is_complete != 'success': + if 'Node is already in a cluster' in failure_msg: + return + elif retries <= 0: + errors.append("Timeout after %s seconds" % self.parameters['time_out']) + if failure_msg: + errors.append(failure_msg) + self.module.fail_json(msg='Error adding node with ip address %s: %s' + % (self.parameters['cluster_ip_address'], str(errors))) + + def node_remove_wait(self): + ''' wait for node name or clister IP address to disappear ''' + if self.use_rest: + # wait is part of delete_async for REST + return + + node_name = self.parameters.get('node_name') + node_ip = self.parameters.get('cluster_ip_address') + retries = self.parameters['time_out'] + while retries > 0: + retries = retries - 10 + if node_name is not None and node_name not in self.get_cluster_nodes(): + return + if node_ip is not None and self.get_cluster_ip_address(node_ip) is None: + return + time.sleep(10) + self.module.fail_json(msg='Timeout waiting for node to be removed from cluster.') + + def get_cluster_action(self, cluster_identity): + cluster_action = None + if self.parameters.get('cluster_name') is not None: + cluster_action = self.na_helper.get_cd_action(cluster_identity, self.parameters) + if cluster_action == 'delete': + # delete only applies to node + cluster_action = None + self.na_helper.changed = False + return cluster_action + + def get_node_action(self): + node_action = None + if self.parameters.get('cluster_ip_address') is not None: + existing_interfaces = self.get_cluster_ip_address(self.parameters.get('cluster_ip_address')) + if self.parameters.get('state') == 'present': + node_action = 'add_node' if existing_interfaces is None else None + else: + node_action = 'remove_node' if existing_interfaces is not None else None + if self.parameters.get('node_name') is not None and self.parameters['state'] == 'absent': + nodes = self.get_cluster_nodes() + if self.parameters.get('node_name') in nodes: + node_action = 'remove_node' + if node_action is not None: + self.na_helper.changed = True + return node_action + + def apply(self): + """ + Apply action to cluster + """ + cluster_identity = self.get_cluster_identity(ignore_error=True) + cluster_action = self.get_cluster_action(cluster_identity) + node_action = self.get_node_action() + modify = self.na_helper.get_modified_attributes(cluster_identity, self.parameters) + + if not self.module.check_mode: + if cluster_action == 'create' and self.create_cluster(): + self.cluster_create_wait() + if node_action == 'add_node': + if self.add_node(): + self.node_add_wait() + elif node_action == 'remove_node': + self.remove_node() + self.node_remove_wait() + if modify: + self.modify_cluster_identity(modify) + + results = {'changed': self.na_helper.changed} + if self.warnings: + results['warnings'] = self.warnings + if netapp_utils.has_feature(self.module, 'show_modified'): + results['modify'] = modify + self.module.exit_json(**results) + + +def main(): + """ + Create object and call apply + """ + cluster_obj = NetAppONTAPCluster() + cluster_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py new file mode 100644 index 000000000..822a0778c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_ha.py @@ -0,0 +1,151 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - "Enable or disable HA on a cluster" +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_cluster_ha +options: + state: + choices: ['present', 'absent'] + type: str + description: + - "Whether HA on cluster should be enabled or disabled." + default: present +short_description: NetApp ONTAP Manage HA status for cluster +version_added: 2.6.0 +''' + +EXAMPLES = """ + - name: "Enable HA status for cluster" + netapp.ontap.na_ontap_cluster_ha: + state: present + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapClusterHA: + """ + object initialize and class methods + """ + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def modify_cluster_ha(self, configure): + """ + Enable or disable HA on cluster + :return: None + """ + if self.use_rest: + return self.modify_cluster_ha_rest(configure) + + cluster_ha_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'cluster-ha-modify', **{'ha-configured': configure}) + try: + self.server.invoke_successfully(cluster_ha_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying cluster HA to %s: %s' + % (configure, to_native(error)), + exception=traceback.format_exc()) + + def get_cluster_ha_enabled(self): + """ + Get current cluster HA details + :return: dict if enabled, None if disabled + """ + if self.use_rest: + return self.get_cluster_ha_enabled_rest() + cluster_ha_get = netapp_utils.zapi.NaElement('cluster-ha-get') + try: + result = self.server.invoke_successfully(cluster_ha_get, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError: + self.module.fail_json(msg='Error fetching cluster HA details', + exception=traceback.format_exc()) + cluster_ha_info = result.get_child_by_name('attributes').get_child_by_name('cluster-ha-info') + if cluster_ha_info.get_child_content('ha-configured') == 'true': + return {'ha-configured': True} + return None + + def get_cluster_ha_enabled_rest(self): + api = 'private/cli/cluster/ha' + params = {'fields': 'configured'} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching cluster HA details: %s' % to_native(error)) + return {'ha-configured': True} if record['configured'] else None + + def modify_cluster_ha_rest(self, configure): + api = 'private/cli/cluster/ha' + body = {'configured': True if configure == "true" else False} + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg='Error modifying cluster HA to %s: %s' % (configure, to_native(error))) + + def apply(self): + """ + Apply action to cluster HA + """ + current = self.get_cluster_ha_enabled() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if not self.module.check_mode: + if cd_action == 'create': + self.modify_cluster_ha("true") + elif cd_action == 'delete': + self.modify_cluster_ha("false") + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Create object and call apply + """ + ha_obj = NetAppOntapClusterHA() + ha_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py new file mode 100644 index 000000000..820001cc4 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_cluster_peer.py @@ -0,0 +1,427 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete cluster peer relations on ONTAP +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap + - netapp.ontap.netapp.na_ontap_peer +module: na_ontap_cluster_peer +options: + state: + choices: ['present', 'absent'] + type: str + description: + - Whether the specified cluster peer should exist or not. + default: present + source_intercluster_lifs: + description: + - List of intercluster addresses of the source cluster. + - Used as peer-addresses in destination cluster. + - All these intercluster lifs should belong to the source cluster. + version_added: 2.8.0 + type: list + elements: str + aliases: + - source_intercluster_lif + dest_intercluster_lifs: + description: + - List of intercluster addresses of the destination cluster. + - Used as peer-addresses in source cluster. + - All these intercluster lifs should belong to the destination cluster. + version_added: 2.8.0 + type: list + elements: str + aliases: + - dest_intercluster_lif + passphrase: + description: + - The arbitrary passphrase that matches the one given to the peer cluster. + type: str + source_cluster_name: + description: + - The name of the source cluster name in the peer relation to be deleted. + type: str + dest_cluster_name: + description: + - The name of the destination cluster name in the peer relation to be deleted. + - Required for delete + type: str + dest_hostname: + description: + - DEPRECATED - please use C(peer_options). + - Destination cluster IP or hostname which needs to be peered. + - Required to complete the peering process at destination cluster. + type: str + dest_username: + description: + - DEPRECATED - please use C(peer_options). + - Destination username. + - Optional if this is same as source username or if a certificate is used. + type: str + dest_password: + description: + - DEPRECATED - please use C(peer_options). + - Destination password. + - Optional if this is same as source password or if a certificate is used.. + type: str + ipspace: + description: + - IPspace of the local intercluster LIFs. + - Assumes Default IPspace if not provided. + type: str + version_added: '20.11.0' + encryption_protocol_proposed: + description: + - Encryption protocol to be used for inter-cluster communication. + - Only available on ONTAP 9.5 or later. + choices: ['tls_psk', 'none'] + type: str + version_added: '20.5.0' +short_description: NetApp ONTAP Manage Cluster peering +version_added: 2.7.0 +''' + +EXAMPLES = """ + + - name: Create cluster peer + netapp.ontap.na_ontap_cluster_peer: + state: present + source_intercluster_lifs: 1.2.3.4,1.2.3.5 + dest_intercluster_lifs: 1.2.3.6,1.2.3.7 + passphrase: XXXX + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + peer_options: + hostname: "{{ dest_netapp_hostname }}" + encryption_protocol_proposed: tls_psk + + - name: Delete cluster peer + netapp.ontap.na_ontap_cluster_peer: + state: absent + source_cluster_name: test-source-cluster + dest_cluster_name: test-dest-cluster + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + peer_options: + hostname: "{{ dest_netapp_hostname }}" + + - name: Create cluster peer - different credentials + netapp.ontap.na_ontap_cluster_peer: + state: present + source_intercluster_lifs: 1.2.3.4,1.2.3.5 + dest_intercluster_lifs: 1.2.3.6,1.2.3.7 + passphrase: XXXX + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + peer_options: + hostname: "{{ dest_netapp_hostname }}" + cert_filepath: "{{ cert_filepath }}" + key_filepath: "{{ key_filepath }}" + encryption_protocol_proposed: tls_psk + +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPClusterPeer: + """ + Class with cluster peer methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + source_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['source_intercluster_lif']), + dest_intercluster_lifs=dict(required=False, type='list', elements='str', aliases=['dest_intercluster_lif']), + passphrase=dict(required=False, type='str', no_log=True), + peer_options=dict(type='dict', options=netapp_utils.na_ontap_host_argument_spec_peer()), + dest_hostname=dict(required=False, type='str'), + dest_username=dict(required=False, type='str'), + dest_password=dict(required=False, type='str', no_log=True), + source_cluster_name=dict(required=False, type='str'), + dest_cluster_name=dict(required=False, type='str'), + ipspace=dict(required=False, type='str'), + encryption_protocol_proposed=dict(required=False, type='str', choices=['tls_psk', 'none']) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ['peer_options', 'dest_hostname'], + ['peer_options', 'dest_username'], + ['peer_options', 'dest_password'] + ], + required_one_of=[['peer_options', 'dest_hostname']], + required_if=[ + ('state', 'absent', ['source_cluster_name', 'dest_cluster_name']), + ('state', 'present', ['source_intercluster_lifs', 'dest_intercluster_lifs']) + ], + supports_check_mode=True + ) + self.generated_passphrase = None + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # set peer server connection + if self.parameters.get('dest_hostname') is not None: + # if dest_hostname is present, peer_options is absent + self.parameters['peer_options'] = dict( + hostname=self.parameters.get('dest_hostname'), + username=self.parameters.get('dest_username'), + password=self.parameters.get('dest_password'), + ) + netapp_utils.setup_host_options_from_module_params( + self.parameters['peer_options'], self.module, + netapp_utils.na_ontap_host_argument_spec_peer().keys()) + self.use_rest = False + self.rest_api = OntapRestAPI(self.module) + self.src_use_rest = self.rest_api.is_rest() + self.dst_rest_api = OntapRestAPI(self.module, host_options=self.parameters['peer_options']) + self.dst_use_rest = self.dst_rest_api.is_rest() + self.use_rest = bool(self.src_use_rest and self.dst_use_rest) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=self.parameters['peer_options']) + + def cluster_peer_get_iter(self, cluster): + """ + Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters + :param cluster: type of cluster (source or destination) + :return: NaElement object for cluster-get-iter with query + """ + cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter') + query = netapp_utils.zapi.NaElement('query') + cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info') + peer_lifs, peer_cluster = self.get_peer_lifs_cluster_keys(cluster) + if self.parameters.get(peer_lifs): + peer_addresses = netapp_utils.zapi.NaElement('peer-addresses') + for peer in self.parameters.get(peer_lifs): + peer_addresses.add_new_child('remote-inet-address', peer) + cluster_peer_info.add_child_elem(peer_addresses) + if self.parameters.get(peer_cluster): + cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster]) + query.add_child_elem(cluster_peer_info) + cluster_peer_get.add_child_elem(query) + return cluster_peer_get + + def cluster_peer_get(self, cluster): + """ + Get current cluster peer info + :param cluster: type of cluster (source or destination) + :return: Dictionary of current cluster peer details if query successful, else return None + """ + if self.use_rest: + return self.cluster_peer_get_rest(cluster) + cluster_peer_get_iter = self.cluster_peer_get_iter(cluster) + result, cluster_info = None, dict() + if cluster == 'source': + server = self.server + else: + server = self.dest_server + try: + result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching cluster peer %s: %s' + % (cluster, to_native(error)), + exception=traceback.format_exc()) + # return cluster peer details + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info') + cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name') + peers = cluster_peer_info.get_child_by_name('peer-addresses') + cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()] + return cluster_info + return None + + def get_peer_lifs_cluster_keys(self, cluster): + if cluster == 'source': + return 'dest_intercluster_lifs', 'dest_cluster_name' + return 'source_intercluster_lifs', 'source_cluster_name' + + def cluster_peer_get_rest(self, cluster): + api = 'cluster/peers' + fields = 'remote' + restapi = self.rest_api if cluster == 'source' else self.dst_rest_api + records, error = rest_generic.get_0_or_more_records(restapi, api, None, fields) + if error: + self.module.fail_json(msg=error) + cluster_info = {} + if records is not None: + peer_lifs, peer_cluster = self.get_peer_lifs_cluster_keys(cluster) + for record in records: + if 'remote' in record: + peer_cluster_exist, peer_addresses_exist = False, False + # check peer lif or peer cluster present in each peer cluster data in current. + # if peer-lifs not present in parameters, use peer_cluster to filter desired cluster peer in current. + if self.parameters.get(peer_lifs) is not None: + peer_addresses_exist = set(self.parameters[peer_lifs]) == set(record['remote']['ip_addresses']) + else: + peer_cluster_exist = self.parameters[peer_cluster] == record['remote']['name'] + if peer_addresses_exist or peer_cluster_exist: + cluster_info['cluster_name'] = record['remote']['name'] + cluster_info['peer-addresses'] = record['remote']['ip_addresses'] + cluster_info['uuid'] = record['uuid'] + return cluster_info + return None + + def cluster_peer_delete(self, cluster, uuid=None): + """ + Delete a cluster peer on source or destination + For source cluster, peer cluster-name = destination cluster name and vice-versa + :param cluster: type of cluster (source or destination) + :return: + """ + if self.use_rest: + return self.cluster_peer_delete_rest(cluster, uuid) + if cluster == 'source': + server, peer_cluster_name = self.server, self.parameters['dest_cluster_name'] + else: + server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name'] + cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'cluster-peer-delete', **{'cluster-name': peer_cluster_name}) + try: + server.invoke_successfully(cluster_peer_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting cluster peer %s: %s' + % (peer_cluster_name, to_native(error)), + exception=traceback.format_exc()) + + def cluster_peer_delete_rest(self, cluster, uuid): + server = self.rest_api if cluster == 'source' else self.dst_rest_api + dummy, error = rest_generic.delete_async(server, 'cluster/peers', uuid) + if error: + self.module.fail_json(msg=error) + + def cluster_peer_create(self, cluster): + """ + Create a cluster peer on source or destination + For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa + :param cluster: type of cluster (source or destination) + :return: None + """ + if self.use_rest: + return self.cluster_peer_create_rest(cluster) + cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create') + if self.parameters.get('passphrase') is not None: + cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase']) + peer_addresses = netapp_utils.zapi.NaElement('peer-addresses') + server, peer_address = self.get_server_and_peer_address(cluster) + for each in peer_address: + peer_addresses.add_new_child('remote-inet-address', each) + cluster_peer_create.add_child_elem(peer_addresses) + if self.parameters.get('encryption_protocol_proposed') is not None: + cluster_peer_create.add_new_child('encryption-protocol-proposed', self.parameters['encryption_protocol_proposed']) + if self.parameters.get('ipspace') is not None: + cluster_peer_create.add_new_child('ipspace-name', self.parameters['ipspace']) + + try: + server.invoke_successfully(cluster_peer_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating cluster peer %s: %s' + % (peer_address, to_native(error)), + exception=traceback.format_exc()) + + def get_server_and_peer_address(self, cluster): + if cluster == 'source': + server = self.rest_api if self.use_rest else self.server + return server, self.parameters['dest_intercluster_lifs'] + server = self.dst_rest_api if self.use_rest else self.dest_server + return server, self.parameters['source_intercluster_lifs'] + + def cluster_peer_create_rest(self, cluster): + api = 'cluster/peers' + body = {} + if self.parameters.get('passphrase') is not None: + body['authentication.passphrase'] = self.parameters['passphrase'] + # generate passphrase in source if passphrase not provided. + elif cluster == 'source': + body['authentication.generate_passphrase'] = True + elif cluster == 'destination': + body['authentication.passphrase'] = self.generated_passphrase + server, peer_address = self.get_server_and_peer_address(cluster) + body['remote.ip_addresses'] = peer_address + if self.parameters.get('encryption_protocol_proposed') is not None: + body['encryption.proposed'] = self.parameters['encryption_protocol_proposed'] + else: + # Default value for encryption.proposed is tls_psk. + # explicitly set to none if encryption_protocol_proposed options not present in parameters. + body['encryption.proposed'] = 'none' + if self.parameters.get('ipspace') is not None: + body['ipspace.name'] = self.parameters['ipspace'] + response, error = rest_generic.post_async(server, api, body) + if error: + self.module.fail_json(msg=error) + if response and cluster == 'source' and 'passphrase' not in self.parameters: + for record in response['records']: + self.generated_passphrase = record['authentication']['passphrase'] + + def apply(self): + """ + Apply action to cluster peer + :return: None + """ + source = self.cluster_peer_get('source') + destination = self.cluster_peer_get('destination') + source_action = self.na_helper.get_cd_action(source, self.parameters) + destination_action = self.na_helper.get_cd_action(destination, self.parameters) + self.na_helper.changed = False + # create only if expected cluster peer relation is not present on both source and destination clusters + # will error out with appropriate message if peer relationship already exists on either cluster + if source_action == 'create' or destination_action == 'create': + if not self.module.check_mode: + self.cluster_peer_create('source') + self.cluster_peer_create('destination') + self.na_helper.changed = True + # delete peer relation in cluster where relation is present + else: + if source_action == 'delete': + if not self.module.check_mode: + uuid = source['uuid'] if source and self.use_rest else None + self.cluster_peer_delete('source', uuid) + self.na_helper.changed = True + if destination_action == 'delete': + if not self.module.check_mode: + uuid = destination['uuid'] if destination and self.use_rest else None + self.cluster_peer_delete('destination', uuid) + self.na_helper.changed = True + + result = netapp_utils.generate_result(self.na_helper.changed, extra_responses={'source_action': source_action, + 'destination_action': destination_action}) + self.module.exit_json(**result) + + +def main(): + """ + Execute action + :return: None + """ + community_obj = NetAppONTAPClusterPeer() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py new file mode 100644 index 000000000..377e1947c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_command.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +''' +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Run system-cli commands on ONTAP. + - Can't be used with cert authentication and domain authentication accounts. + - Requires ontapi and console permissions. Console is not supported for data vservers. + - Requires write permissions, even for show commands! ONTAP reports + "Insufficient privileges" and "user 'xxxxx' does not have write access to this resource" + for a readonly user. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +module: na_ontap_command +short_description: NetApp ONTAP Run any cli command, the username provided needs to have console login permission. +version_added: 2.7.0 +options: + command: + description: + - a comma separated list containing the command and arguments. + required: true + type: list + elements: str + privilege: + description: + - privilege level at which to run the command. + choices: ['admin', 'advanced'] + default: admin + type: str + version_added: 2.8.0 + return_dict: + description: + - Returns a parsesable dictionary instead of raw XML output + - C(result_value) + - C(status) > passed, failed. + - C(stdout) > command output in plaintext. + - C(stdout_lines) > list of command output lines. + - C(stdout_lines_filter) > empty list or list of command output lines matching I(include_lines) or I(exclude_lines) parameters. + - C(xml_dict) > JSON representation of what the CLI returned. + type: bool + default: false + version_added: 2.9.0 + vserver: + description: + - If running as vserver admin, you must give a I(vserver) or module will fail + version_added: "19.10.0" + type: str + include_lines: + description: + - applied only when I(return_dict) is true + - return only lines containing string pattern in C(stdout_lines_filter) + default: '' + type: str + version_added: "19.10.0" + exclude_lines: + description: + - applied only when I(return_dict) is true + - return only lines containing string pattern in C(stdout_lines_filter) + default: '' + type: str + version_added: "19.10.0" +''' + +EXAMPLES = """ + - name: run ontap cli command + netapp.ontap.na_ontap_command: + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" + command: ['version'] + + # Same as above, but returns parseable dictonary + - name: run ontap cli command + netapp.ontap.na_ontap_command: + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" + command: ['node', 'show', '-fields', 'node,health,uptime,model'] + privilege: 'admin' + return_dict: true + + # Same as above, but with lines filtering + - name: run ontap cli command + netapp.ontap.na_ontap_command: + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" + command: ['node', 'show', '-fields', 'node,health,uptime,model'] + exclude_lines: 'ode ' # Exclude lines with 'Node ' or 'node ', or anything else containing 'ode '. + privilege: 'admin' + return_dict: true +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +class NetAppONTAPCommand(): + ''' calls a CLI command ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + command=dict(required=True, type='list', elements='str'), + privilege=dict(required=False, type='str', choices=['admin', 'advanced'], default='admin'), + return_dict=dict(required=False, type='bool', default=False), + vserver=dict(required=False, type='str'), + include_lines=dict(required=False, type='str', default=''), + exclude_lines=dict(required=False, type='str', default=''), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + parameters = self.module.params + # set up state variables + self.command = parameters['command'] + self.privilege = parameters['privilege'] + self.vserver = parameters['vserver'] + self.return_dict = parameters['return_dict'] + self.include_lines = parameters['include_lines'] + self.exclude_lines = parameters['exclude_lines'] + + self.result_dict = { + 'status': "", + 'result_value': 0, + 'invoked_command': " ".join(self.command), + 'stdout': "", + 'stdout_lines': [], + 'stdout_lines_filter': [], + 'xml_dict': {}, + } + self.module.warn('The module only supports ZAPI and is deprecated, and will no longer work with newer versions ' + 'of ONTAP when ONTAPI is deprecated in CY22-Q4') + self.module.warn('netapp.ontap.na_ontap_rest_cli should be used instead.') + + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True) + + def run_command(self): + ''' calls the ZAPI ''' + command_obj = netapp_utils.zapi.NaElement("system-cli") + + args_obj = netapp_utils.zapi.NaElement("args") + if self.return_dict: + args_obj.add_new_child('arg', 'set') + args_obj.add_new_child('arg', '-showseparator') + args_obj.add_new_child('arg', '"###"') + args_obj.add_new_child('arg', ';') + for arg in self.command: + args_obj.add_new_child('arg', arg) + command_obj.add_child_elem(args_obj) + command_obj.add_new_child('priv', self.privilege) + + try: + output = self.server.invoke_successfully(command_obj, True) + if self.return_dict: + # Parseable dict output + return self.parse_xml_to_dict(output.to_string()) + else: + # Raw XML output + return output.to_string() + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error running command %s: %s' % + (self.command, to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + ''' calls the command and returns raw output ''' + changed = True + if self.module.check_mode: + output = "Would run command: '%s'" % str(self.command) + else: + output = self.run_command() + self.module.exit_json(changed=changed, msg=output) + + def parse_xml_to_dict(self, xmldata): + '''Parse raw XML from system-cli and create an Ansible parseable dictonary''' + xml_import_ok = True + xml_parse_ok = True + importing = 'ast' + + try: + import ast + importing = 'xml.parsers.expat' + import xml.parsers.expat + except ImportError: + self.result_dict['status'] = "XML parsing failed. Cannot import %s!" % importing + self.result_dict['stdout'] = str(xmldata) + self.result_dict['result_value'] = -1 + xml_import_ok = False + + if xml_import_ok: + xml_str = xmldata.decode('utf-8').replace('\n', '---') + xml_parser = xml.parsers.expat.ParserCreate() + xml_parser.StartElementHandler = self._start_element + xml_parser.CharacterDataHandler = self._char_data + xml_parser.EndElementHandler = self._end_element + + try: + xml_parser.Parse(xml_str) + except xml.parsers.expat.ExpatError as errcode: + self.result_dict['status'] = "XML parsing failed: " + str(errcode) + self.result_dict['stdout'] = str(xmldata) + self.result_dict['result_value'] = -1 + xml_parse_ok = False + + if xml_parse_ok: + self.result_dict['status'] = self.result_dict['xml_dict']['results']['attrs']['status'] + stdout_string = self._format_escaped_data(self.result_dict['xml_dict']['cli-output']['data']) + self.result_dict['stdout'] = stdout_string + # Generate stdout_lines list + for line in stdout_string.split('\n'): + stripped_line = line.strip() + if len(stripped_line) > 1: + self.result_dict['stdout_lines'].append(stripped_line) + + # Generate stdout_lines_filter_list + if self.exclude_lines: + if self.include_lines in stripped_line and self.exclude_lines not in stripped_line: + self.result_dict['stdout_lines_filter'].append(stripped_line) + elif self.include_lines and self.include_lines in stripped_line: + self.result_dict['stdout_lines_filter'].append(stripped_line) + + self.result_dict['xml_dict']['cli-output']['data'] = stdout_string + cli_result_value = self.result_dict['xml_dict']['cli-result-value']['data'] + try: + # get rid of extra quotes "'1'", but maybe "u'1'" or "b'1'" + cli_result_value = ast.literal_eval(cli_result_value) + except (SyntaxError, ValueError): + pass + try: + self.result_dict['result_value'] = int(cli_result_value) + except ValueError: + self.result_dict['result_value'] = cli_result_value + + return self.result_dict + + def _start_element(self, name, attrs): + ''' Start XML element ''' + self.result_dict['xml_dict'][name] = {} + self.result_dict['xml_dict'][name]['attrs'] = attrs + self.result_dict['xml_dict'][name]['data'] = "" + self.result_dict['xml_dict']['active_element'] = name + self.result_dict['xml_dict']['last_element'] = "" + + def _char_data(self, data): + ''' Dump XML elemet data ''' + self.result_dict['xml_dict'][str(self.result_dict['xml_dict']['active_element'])]['data'] = repr(data) + + def _end_element(self, name): + self.result_dict['xml_dict']['last_element'] = name + self.result_dict['xml_dict']['active_element'] = "" + + @classmethod + def _format_escaped_data(cls, datastring): + ''' replace helper escape sequences ''' + formatted_string = datastring.replace('------', '---').replace('---', '\n').replace("###", " ").strip() + retval_string = "" + for line in formatted_string.split('\n'): + stripped_line = line.strip() + if len(stripped_line) > 1: + retval_string += stripped_line + "\n" + return retval_string + + +def main(): + """ + Execute action from playbook + """ + command = NetAppONTAPCommand() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py new file mode 100644 index 000000000..396240806 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_debug.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +""" +create Debug module to diagnose netapp-lib import and connection +""" + +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_ontap_debug +short_description: NetApp ONTAP Debug netapp-lib import and connection. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.1.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Display issues related to importing netapp-lib and connection with diagnose +options: + vserver: + description: + - The vserver name to test for ZAPI tunneling. + required: false + type: str +''' +EXAMPLES = """ + - name: Check import netapp-lib + na_ontap_debug: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import sys +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.rest_user import get_users +from ansible_collections.netapp.ontap.plugins.module_utils.rest_vserver import get_vserver + + +class NetAppONTAPDebug(object): + """Class with Debug methods""" + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + vserver=dict(required=False, type="str"), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.log_list = [] + self.error_list = [] + self.note_list = [] + self.server = None + + def list_versions(self): + self.log_list.append('Ansible version: %s' % netapp_utils.ANSIBLE_VERSION) + self.log_list.append('ONTAP collection version: %s' % netapp_utils.COLLECTION_VERSION) + self.log_list.append('Python version: %s' % sys.version[:3]) + self.log_list.append('Python executable path: %s' % sys.executable) + + def import_lib(self): + if not netapp_utils.has_netapp_lib(): + msgs = [ + 'Error importing netapp-lib or a dependency: %s.' % str(netapp_utils.IMPORT_EXCEPTION), + 'Install the python netapp-lib module or a missing dependency.', + 'Additional diagnostic information:', + 'Python Executable Path: %s.' % sys.executable, + 'Python Version: %s.' % sys.version, + 'System Path: %s.' % ','.join(sys.path), + ] + self.error_list.append(' '.join(msgs)) + return + self.log_list.append('netapp-lib imported successfully.') + + def check_connection(self, connection_type): + """ + check connection errors and diagnose + """ + error_string = None + if connection_type == "REST": + api = 'cluster' + message, error_string = self.rest_api.get(api) + elif connection_type == "ZAPI": + if 'vserver' not in self.parameters: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + version_obj = netapp_utils.zapi.NaElement("system-get-version") + try: + result = self.server.invoke_successfully(version_obj, True) + except netapp_utils.zapi.NaApiError as error: + error_string = to_native(error) + else: + self.module.fail_json(msg='Internal error, unexpected connection type: %s' % connection_type) + + if error_string is not None: + summary_msg = None + error_patterns = ['Connection timed out', + 'Resource temporarily unavailable', + 'ConnectTimeoutError', + 'Network is unreachable'] + if any(x in error_string for x in error_patterns): + summary_msg = 'Error: invalid or unreachable hostname: %s' % self.parameters['hostname'] + if 'vserver' in self.parameters: + summary_msg += ' for SVM: %s ' % self.parameters['vserver'] + self.error_list.append('Error in hostname - Address does not exist or is not reachable: ' + error_string) + self.error_list.append(summary_msg + ' using %s.' % connection_type) + return + error_patterns = ['Name or service not known', 'Name does not resolve'] + if any(x in error_string for x in error_patterns): + summary_msg = 'Error: unknown or not resolvable hostname: %s' % self.parameters['hostname'] + if 'vserver' in self.parameters: + summary_msg += ' for SVM: %s ' % self.parameters['vserver'] + self.error_list.append('Error in hostname - DNS name cannot be resolved: ' + error_string) + self.error_list.append('%s cannot be resolved using %s.' % (summary_msg, connection_type)) + else: + self.error_list.append('Other error for hostname: %s using %s: %s.' % (self.parameters['hostname'], connection_type, error_string)) + self.error_list.append('Unclassified, see msg') + return False + + ontap_version = message['version']['full'] if connection_type == 'REST' else result['version'] + self.log_list.append('%s connected successfully.' % connection_type) + self.log_list.append('ONTAP version: %s' % ontap_version) + return True + + def list_interfaces(self, vserver_name): + vserver, error = get_vserver(self.rest_api, vserver_name, fields='ip_interfaces') + if not error and not vserver: + error = 'not found' + if error: + self.error_list.append('Error getting vserver in list_interfaces: %s: %s' % (vserver_name, error)) + else: + interfaces = vserver.get('ip_interfaces') + if not interfaces: + self.error_list.append('Error vserver is not associated with a network interface: %s' % vserver_name) + return + for interface in interfaces: + data = [vserver_name] + for field in (['name'], ['ip', 'address'], ['services']): + value = self.na_helper.safe_get(interface, field) + if isinstance(value, list): + value = ','.join(value) + if field == ['services'] and value and 'management' not in value: + self.note_list.append('NOTE: no management policy in services for %s: %s' % (data, value)) + data.append(value) + self.log_list.append('vserver: %s, interface: %s, IP: %s, service policies: %s' % tuple(data)) + + def validate_user(self, user): + locked = user.get('locked') + if locked: + self.note_list.append('NOTE: user: %s is locked on vserver: %s' % (user['name'], self.na_helper.safe_get(user, ['owner', 'name']))) + applications = user.get('applications', []) + apps = [app['application'] for app in applications] + role = self.na_helper.safe_get(user, ['role', 'name']) + for application in ('http', 'ontapi', 'console'): + if application not in apps and (application != 'console' or role == 'admin'): + self.note_list.append('NOTE: application %s not found for user: %s: %s' % (application, user['name'], apps)) + if application == 'console': + self.note_list.append("NOTE: console access is only needed for na_ontap_command.") + has_http = locked is False and 'http' in apps + has_ontapi = locked is False and 'ontapi' in apps + return has_http, has_ontapi + + def list_users(self, vserver_name=None, user_name=None): + query = {'owner.name': vserver_name} if vserver_name else {'name': user_name} + users, error = get_users(self.rest_api, query, 'applications,locked,owner,role') + if not error and not users: + error = 'none found' + name = vserver_name or user_name + if error: + if 'not authorized for that command' in error: + self.log_list.append('Not autorized to get accounts for: %s: %s' % (name, error)) + else: + self.error_list.append('Error getting accounts for: %s: %s' % (name, error)) + else: + one_http, one_ontapi = False, False + for user in users: + data = {} + for field in ('owner', 'name', 'role', 'locked', 'applications'): + if field in ('owner', 'role'): + value = str(self.na_helper.safe_get(user, [field, 'name'])) + else: + value = str(user.get(field)) + data[field] = value + self.log_list.append(', '. join('%s: %s' % x for x in data.items())) + has_http, has_ontapi = self.validate_user(user) + one_http |= has_http + one_ontapi |= has_ontapi + msg = 'Error: no unlocked user for %s on vserver: %s'if vserver_name else\ + 'Error: %s is not enabled for user %s' + if not one_http: + self.error_list.append(msg % ('http', name)) + if not one_ontapi: + self.error_list.append(msg % ('ontapi', name)) + + def check_vserver(self, name): + + self.list_interfaces(name) + self.list_users(vserver_name=name) + + def apply(self): + """ + Apply debug + """ + # report Ansible and our collection versions + self.list_versions() + + # check import netapp-lib + self.import_lib() + + # check zapi connection errors only if import successful + if netapp_utils.has_netapp_lib(): + self.check_connection("ZAPI") + + # check rest connection errors + has_rest = self.check_connection("REST") + + if has_rest: + self.list_users(user_name=self.parameters.get('username')) + if 'vserver' in self.parameters: + self.check_vserver(self.parameters['vserver']) + + msgs = {} + if self.note_list: + msgs['notes'] = self.note_list + if self.error_list: + msgs['msg'] = self.error_list + if self.log_list: + msgs['msg_passed'] = self.log_list + self.module.fail_json(**msgs) + msgs['msg'] = self.log_list + self.module.exit_json(**msgs) + + +def main(): + """Execute action""" + debug_obj = NetAppONTAPDebug() + debug_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py new file mode 100644 index 000000000..3c05a6f56 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disk_options.py @@ -0,0 +1,175 @@ +#!/usr/bin/python + +# (c) 2021-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_disk_options +short_description: NetApp ONTAP modify storage disk options +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Modify a nodes storage disk options +- Requires ONTAP 9.6 or greater +options: + node: + description: + - The node to modify a disk option for + required: true + type: str + + bkg_firmware_update: + description: + - Whether or not background disk firmware updates should be enabled + type: bool + + autocopy: + description: + - Whether or not disk auto copies should be enabled + type: bool + + autoassign: + description: + - Whether or not disks should be automatically assigned to a node + type: bool + + autoassign_policy: + description: + - the auto assign policy to use + type: str + choices: ['default', 'bay', 'shelf', 'stack'] + ''' + +EXAMPLES = """ + - name: Enable Disk Auto Assign + na_ontap_disk_options: + node: node1 + autoassign: true + username: '{{ username }}' + password: '{{ password }}' + hostname: '{{ hostname }}' + + - name: Disable Disk Auto Assign + na_ontap_disk_options: + node: node1 + autoassign: false + username: '{{ username }}' + password: '{{ password }}' + hostname: '{{ hostname }}' + +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapDiskOptions: + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + node=dict(required=True, type='str'), + bkg_firmware_update=dict(required=False, type='bool'), + autocopy=dict(required=False, type='bool'), + autoassign=dict(required=False, type='bool'), + autoassign_policy=dict(required=False, type='str', choices=['default', 'bay', 'shelf', 'stack']) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_disk_options', '9.6')) + + def convert_to_bool(self, adict, key): + """burt1468160 - 9.8 returns True/False, but 9.10.1 returns 'on'/'off' """ + value = adict[key] + if isinstance(value, bool): + return value + if value in ('on', 'off'): + return value == 'on' + self.module.fail_json(msg='Unexpected value for field %s: %s' % (key, value)) + + def get_disk_options(self): + """ + Return a the current storage disk options for the node + :return: a dict of storage disk options + """ + api = "private/cli/storage/disk/option" + query = { + 'fields': 'node,autoassign,bkg-firmware-update,autocopy,autoassign-policy', + 'node': self.parameters['node'] + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + + if error: + self.module.fail_json(msg='Error %s' % error) + if record is None: + self.module.fail_json(msg='Error on GET %s, no record.' % api) + return { + 'node': record['node'], + 'bkg_firmware_update': self.convert_to_bool(record, 'bkg_firmware_update'), + 'autocopy': self.convert_to_bool(record, 'autocopy'), + 'autoassign': self.convert_to_bool(record, 'autoassign'), + 'autoassign_policy': record['autoassign_policy'] + } + + def modify_disk_options(self, modify): + """ + Modifies a nodes disk options + :return: None + """ + + api = "private/cli/storage/disk/option" + query = { + 'node': self.parameters['node'] + } + + dummy, error = rest_generic.patch_async(self.rest_api, api, None, modify, query) + if error: + self.module.fail_json(msg='Error %s' % error) + + def apply(self): + + current = self.get_disk_options() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + self.modify_disk_options(modify) + + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapDiskOptions() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py new file mode 100644 index 000000000..a4db53319 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_disks.py @@ -0,0 +1,386 @@ +#!/usr/bin/python + +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_disks + +short_description: NetApp ONTAP Assign disks to nodes +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Assign disks to a node. +- Disk autoassign must be turned off before using this module to prevent the disks being reassigned automatically by the cluster. +- This can be done through na_ontap_disk_options or via the cli "disk option modify -node -autoassign off". +- If min_spares is not specified min_spares default is 1 if SSD or 2 for any other disk type. +- If disk_count is not specified all unassigned disks will be assigned to the node specified. + +options: + node: + required: true + type: str + description: + - The node that we want to assign/unassign disks. + + disk_count: + description: + - Total number of disks a node should own. + type: int + version_added: 2.9.0 + + disk_type: + description: + - Assign specified type of disk (or set of disks). + type: str + choices: ['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown'] + version_added: 20.6.0 + + min_spares: + description: + - Minimum spares required per type for the node. + type: int + version_added: 21.7.0 + +''' + +EXAMPLES = """ + - name: Assign specified total disks to node + netapp.ontap.na_ontap_disks: + node: node1 + disk_count: 56 + disk_type: VMDISK + min_spares: 2 + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" + + - name: Assign all unassigned disks to node1 + netapp.ontap.na_ontap_disks: + node: node1 + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapDisks(): + ''' object initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + node=dict(required=True, type='str'), + disk_count=dict(required=False, type='int'), + disk_type=dict(required=False, type='str', choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']), + min_spares=dict(required=False, type='int') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # If min_spares is not specified min_spares is 1 if SSD, min_spares is 2 for any other disk type. + self.parameters['min_spares'] = 1 if self.parameters.get('disk_type') in ('SSD', 'SSD_NVM') else 2 + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_disks(self, container_type, node=None): + """ + Check for owned disks, unassigned disks or spare disks. + Return: list of disks or an empty list + """ + if self.use_rest: + api = "storage/disks" + if container_type == 'owned': + query = { + 'home_node.name': node, + 'container_type': '!unassigned', + 'fields': 'name' + } + if container_type == 'unassigned': + query = { + 'container_type': 'unassigned', + 'fields': 'name' + } + if container_type == 'spare': + query = { + 'home_node.name': node, + 'container_type': 'spare', + 'fields': 'name' + } + if 'disk_type' in self.parameters: + query['type'] = self.parameters['disk_type'] + + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + + if error: + self.module.fail_json(msg=error) + + return records if records else list() + + else: + disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter') + disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info') + + if container_type == 'owned': + disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info') + disk_ownership_info.add_new_child('home-node-name', self.parameters['node']) + disk_storage_info.add_child_elem(disk_ownership_info) + if container_type == 'unassigned': + disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info') + disk_raid_info.add_new_child('container-type', 'unassigned') + disk_storage_info.add_child_elem(disk_raid_info) + + disk_query = netapp_utils.zapi.NaElement('query') + + if 'disk_type' in self.parameters and container_type in ('unassigned', 'owned'): + disk_inventory_info = netapp_utils.zapi.NaElement('disk-inventory-info') + disk_inventory_info.add_new_child('disk-type', self.parameters['disk_type']) + disk_query.add_child_elem(disk_inventory_info) + + if container_type == 'spare': + disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info') + disk_raid_info = netapp_utils.zapi.NaElement('disk-raid-info') + disk_ownership_info.add_new_child('owner-node-name', node) + if 'disk_type' in self.parameters: + disk_inventory_info = netapp_utils.zapi.NaElement('disk-inventory-info') + disk_inventory_info.add_new_child('disk-type', self.parameters['disk_type']) + disk_storage_info.add_child_elem(disk_inventory_info) + + disk_raid_info.add_new_child('container-type', 'spare') + disk_storage_info.add_child_elem(disk_ownership_info) + disk_storage_info.add_child_elem(disk_raid_info) + + disk_query.add_child_elem(disk_storage_info) + disk_iter.add_child_elem(disk_query) + + try: + result = self.server.invoke_successfully(disk_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting disk information: %s' % (to_native(error)), + exception=traceback.format_exc()) + + disks = [] + + if result.get_child_by_name('attributes-list'): + attributes_list = result.get_child_by_name('attributes-list') + storage_disk_info_attributes = attributes_list.get_children() + + for disk in storage_disk_info_attributes: + disk_inventory_info = disk.get_child_by_name('disk-inventory-info') + disk_name = disk_inventory_info.get_child_content('disk-cluster-name') + disks.append(disk_name) + + return disks + + def get_partner_node_name(self): + """ + return: partner_node_name, str + """ + if self.use_rest: + api = "/cluster/nodes" + query = { + 'ha.partners.name': self.parameters['node'] + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + if error: + self.module.fail_json(msg=error) + + return records[0]['name'] if records else None + + else: + partner_name = None + cf_status = netapp_utils.zapi.NaElement('cf-status') + cf_status.add_new_child('node', self.parameters['node']) + + try: + result = self.server.invoke_successfully(cf_status, True) + + if result.get_child_by_name('partner-name'): + partner_name = result.get_child_content('partner-name') + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting partner name for node %s: %s' % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + + return partner_name + + def disk_assign(self, needed_disks): + """ + Assign disks to node + """ + if self.use_rest: + api = "private/cli/storage/disk/assign" + if needed_disks > 0: + body = { + 'owner': self.parameters['node'], + 'count': needed_disks + } + if 'disk_type' in self.parameters: + body['type'] = self.parameters['disk_type'] + else: + body = { + 'node': self.parameters['node'], + 'all': True + } + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + else: + if needed_disks > 0: + assign_disk = netapp_utils.zapi.NaElement.create_node_with_children( + 'disk-sanown-assign', **{'owner': self.parameters['node'], + 'disk-count': str(needed_disks)}) + if 'disk_type' in self.parameters: + assign_disk.add_new_child('disk-type', self.parameters['disk_type']) + else: + assign_disk = netapp_utils.zapi.NaElement.create_node_with_children( + 'disk-sanown-assign', **{'node-name': self.parameters['node'], + 'all': 'true'}) + + try: + self.server.invoke_successfully(assign_disk, + enable_tunneling=True) + return True + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error assigning disks %s' % (to_native(error)), + exception=traceback.format_exc()) + + def disk_unassign(self, disks): + """ + Unassign disks. + Disk autoassign must be turned off when removing ownership of a disk + """ + if self.use_rest: + api = "private/cli/storage/disk/removeowner" + for disk in disks: # api requires 1 disk to be removed at a time. + body = { + 'disk': disk['name'] + } + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + else: + unassign_partitions = netapp_utils.zapi.NaElement('disk-sanown-remove-ownership') + disk_list = netapp_utils.zapi.NaElement('disk-list') + + for disk in disks: + disk_list.add_new_child('disk-name', disk) + + unassign_partitions.add_child_elem(disk_list) + + try: + self.server.invoke_successfully(unassign_partitions, enable_tunneling=True) + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error unassigning disks %s' % to_native(error)) + return True + + def apply(self): + '''Apply action to disks''' + changed = False + + owned_disks = self.get_disks(container_type='owned', node=self.parameters['node']) + owned_disks_count = len(owned_disks) + unassigned_disks = self.get_disks(container_type='unassigned') + owned_spare_disks = self.get_disks(container_type='spare', node=self.parameters['node']) + + needed_disks = None + unassign = { + 'spare_disks': None, + 'unassign_disks': None + } + + # unassign disks if more disks are currently owned than requested. + if 'disk_count' in self.parameters: + if self.parameters['disk_count'] < owned_disks_count: + unassign_disks_count = owned_disks_count - self.parameters['disk_count'] + # check to make sure we will have sufficient spares after the removal. + if unassign_disks_count > (len(owned_spare_disks) - self.parameters['min_spares']): + self.module.fail_json(msg="disk removal would leave less than %s spares" % self.parameters['min_spares']) + # unassign disks. + unassign = { + 'spare_disks': owned_spare_disks, + 'unassign_disks': unassign_disks_count + } + + # take spare disks from partner so they can be reassigned to the desired node. + elif self.parameters['disk_count'] > (owned_disks_count + len(unassigned_disks)): + required_disks_count = self.parameters['disk_count'] - (owned_disks_count + len(unassigned_disks)) + partner_node_name = self.get_partner_node_name() + partner_spare_disks = self.get_disks(container_type='spare', node=partner_node_name) + + if required_disks_count > (len(partner_spare_disks) - self.parameters['min_spares']): + self.module.fail_json(msg="not enough disks available") + else: + unassign = { + 'spare_disks': partner_spare_disks, + 'unassign_disks': required_disks_count + } + + # assign disks to node. + if self.parameters['disk_count'] > owned_disks_count: + needed_disks = self.parameters['disk_count'] - owned_disks_count + + else: + if len(unassigned_disks) >= 1: + # assign all unassigned disks to node + needed_disks = 0 + + # unassign + if unassign['spare_disks'] and unassign['unassign_disks']: + if not self.module.check_mode: + self.disk_unassign(unassign['spare_disks'][0:unassign['unassign_disks']]) + changed = True + # assign + if needed_disks is not None: + if not self.module.check_mode: + self.disk_assign(needed_disks) + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + ''' Create object and call apply ''' + obj_aggr = NetAppOntapDisks() + obj_aggr.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py new file mode 100644 index 000000000..67d23cffd --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_dns.py @@ -0,0 +1,368 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_dns +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_dns +short_description: NetApp ONTAP Create, delete, modify DNS servers. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete, modify DNS servers. +- With REST, the module is currently limited to data vservers for delete or modify operations. +options: + state: + description: + - Whether the DNS servers should be enabled for the given vserver. + choices: ['present', 'absent'] + type: str + default: present + + vserver: + description: + - The name of the vserver to use. + - With REST, for cluster scoped DNS, omit this option or set it to NULL. + - With ZAPI or REST, for cluster scoped DNS, this can also be set to the cluster vserver name. + type: str + + domains: + description: + - List of DNS domains such as 'sales.bar.com'. The first domain is the one that the Vserver belongs to. + type: list + elements: str + + nameservers: + description: + - List of IPv4 addresses of name servers such as '123.123.123.123'. + type: list + elements: str + + skip_validation: + type: bool + description: + - By default, all nameservers are checked to validate they are available to resolve. + - If you DNS servers are not yet installed or momentarily not available, you can set this option to 'true' + - to bypass the check for all servers specified in nameservers field. + - With REST, requires ONTAP 9.9.1 or later and ignored for cluster DNS operations. + version_added: 2.8.0 +''' + +EXAMPLES = """ + - name: create or modify DNS + netapp.ontap.na_ontap_dns: + state: present + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + vserver: "{{vservername}}" + domains: sales.bar.com + nameservers: 10.193.0.250,10.192.0.250 + skip_validation: true + + - name: create or modify cluster DNS with REST + netapp.ontap.na_ontap_dns: + state: present + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + domains: sales.bar.com + nameservers: 10.193.0.250,10.192.0.250 +""" + +RETURN = """ + +""" +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppOntapDns: + """ + Enable and Disable dns + """ + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=False, type='str'), + domains=dict(required=False, type='list', elements='str'), + nameservers=dict(required=False, type='list', elements='str'), + skip_validation=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[('state', 'present', ['domains', 'nameservers'])], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Cluster vserver and data vserver use different REST API. + self.is_cluster = False + + # REST API should be used for ONTAP 9.6 or higher, ZAPI for lower version + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, [['skip_validation', (9, 9, 1)]]) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if not self.parameters.get('vserver'): + self.module.fail_json(msg="Error: vserver is a required parameter with ZAPI.") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + return + + def patch_cluster_dns(self): + api = 'cluster' + body = { + 'dns_domains': self.parameters['domains'], + 'name_servers': self.parameters['nameservers'] + } + if self.parameters.get('skip_validation'): + self.module.warn("skip_validation is ignored for cluster DNS operations in REST.") + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg="Error updating cluster DNS options: %s" % error) + + def create_dns_rest(self): + """ + Create DNS server + :return: none + """ + if self.is_cluster or not self.parameters.get('vserver'): + # with 9.13, using scope=cluster with POST on 'name-services/dns' does not work: + # "svm.uuid" is a required field + return self.patch_cluster_dns() + + api = 'name-services/dns' + body = { + 'domains': self.parameters['domains'], + 'servers': self.parameters['nameservers'], + 'svm': { + 'name': self.parameters['vserver'] + } + } + if 'skip_validation' in self.parameters: + body['skip_config_validation'] = self.parameters['skip_validation'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating DNS service: %s" % error) + + def create_dns(self): + """ + Create DNS server + :return: none + """ + if self.use_rest: + return self.create_dns_rest() + + dns = netapp_utils.zapi.NaElement('net-dns-create') + nameservers = netapp_utils.zapi.NaElement('name-servers') + domains = netapp_utils.zapi.NaElement('domains') + for each in self.parameters['nameservers']: + ip_address = netapp_utils.zapi.NaElement('ip-address') + ip_address.set_content(each) + nameservers.add_child_elem(ip_address) + dns.add_child_elem(nameservers) + for each in self.parameters['domains']: + domain = netapp_utils.zapi.NaElement('string') + domain.set_content(each) + domains.add_child_elem(domain) + dns.add_child_elem(domains) + if self.parameters.get('skip_validation'): + validation = netapp_utils.zapi.NaElement('skip-config-validation') + validation.set_content(str(self.parameters['skip_validation'])) + dns.add_child_elem(validation) + try: + self.server.invoke_successfully(dns, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating dns: %s' % to_native(error), + exception=traceback.format_exc()) + + def destroy_dns_rest(self, dns_attrs): + """ + Destroys an already created dns + :return: + """ + if self.is_cluster: + error = 'Error: cluster scope when deleting DNS with REST requires ONTAP 9.9.1 or later.' + self.module.fail_json(msg=error) + api = 'name-services/dns' + dummy, error = rest_generic.delete_async(self.rest_api, api, dns_attrs['uuid']) + if error: + self.module.fail_json(msg="Error deleting DNS service: %s" % error) + + def destroy_dns(self, dns_attrs): + """ + Destroys an already created dns + :return: + """ + if self.use_rest: + return self.destroy_dns_rest(dns_attrs) + + try: + self.server.invoke_successfully(netapp_utils.zapi.NaElement('net-dns-destroy'), True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error destroying dns: %s' % to_native(error), + exception=traceback.format_exc()) + + def get_cluster(self): + api = "cluster" + record, error = rest_generic.get_one_record(self.rest_api, api) + if error: + self.module.fail_json(msg="Error getting cluster info: %s" % error) + return record + + def get_cluster_dns(self): + cluster_attrs = self.get_cluster() + dns_attrs = None + if not self.parameters.get('vserver') or self.parameters['vserver'] == cluster_attrs['name']: + dns_attrs = { + 'domains': cluster_attrs.get('dns_domains'), + 'nameservers': cluster_attrs.get('name_servers'), + 'uuid': cluster_attrs['uuid'], + } + self.is_cluster = True + if dns_attrs['domains'] is None and dns_attrs['nameservers'] is None: + dns_attrs = None + return dns_attrs + + def get_dns_rest(self): + if not self.parameters.get('vserver') and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # scope requires 9.9, so revert to cluster API + return self.get_cluster_dns() + + api = "name-services/dns" + params = {'fields': 'domains,servers,svm'} + if self.parameters.get('vserver'): + # omit scope as vserver may be a cluster vserver + params['svm.name'] = self.parameters['vserver'] + else: + params['scope'] = 'cluster' + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error getting DNS service: %s" % error) + if record: + return { + 'domains': record['domains'], + 'nameservers': record['servers'], + 'uuid': record['svm']['uuid'] + } + if self.parameters.get('vserver') and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # There is a chance we are working at the cluster level + return self.get_cluster_dns() + return None + + def get_dns(self): + if self.use_rest: + return self.get_dns_rest() + + dns_obj = netapp_utils.zapi.NaElement('net-dns-get') + try: + result = self.server.invoke_successfully(dns_obj, True) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) == "15661": + # 15661 is object not found + return None + else: + self.module.fail_json(msg="Error getting DNS info: %s." % to_native(error), exception=traceback.format_exc()) + + attributes = result.get_child_by_name('attributes') + if attributes is None: + return + dns_info = attributes.get_child_by_name('net-dns-info') + nameservers = dns_info.get_child_by_name('name-servers') + attrs = { + 'nameservers': [ + each.get_content() for each in nameservers.get_children() + ] + } + domains = dns_info.get_child_by_name('domains') + attrs['domains'] = [each.get_content() for each in domains.get_children()] + return attrs + + def modify_dns_rest(self, dns_attrs): + if self.is_cluster: + return self.patch_cluster_dns() + body = {} + if dns_attrs['nameservers'] != self.parameters['nameservers']: + body['servers'] = self.parameters['nameservers'] + if dns_attrs['domains'] != self.parameters['domains']: + body['domains'] = self.parameters['domains'] + if 'skip_validation' in self.parameters: + body['skip_config_validation'] = self.parameters['skip_validation'] + api = "name-services/dns" + dummy, error = rest_generic.patch_async(self.rest_api, api, dns_attrs['uuid'], body) + if error: + self.module.fail_json(msg="Error modifying DNS configuration: %s" % error) + + def modify_dns(self, dns_attrs): + if self.use_rest: + return self.modify_dns_rest(dns_attrs) + dns = netapp_utils.zapi.NaElement('net-dns-modify') + if dns_attrs['nameservers'] != self.parameters['nameservers']: + nameservers = netapp_utils.zapi.NaElement('name-servers') + for each in self.parameters['nameservers']: + ip_address = netapp_utils.zapi.NaElement('ip-address') + ip_address.set_content(each) + nameservers.add_child_elem(ip_address) + dns.add_child_elem(nameservers) + if dns_attrs['domains'] != self.parameters['domains']: + domains = netapp_utils.zapi.NaElement('domains') + for each in self.parameters['domains']: + domain = netapp_utils.zapi.NaElement('string') + domain.set_content(each) + domains.add_child_elem(domain) + dns.add_child_elem(domains) + if self.parameters.get('skip_validation'): + validation = netapp_utils.zapi.NaElement('skip-config-validation') + validation.set_content(str(self.parameters['skip_validation'])) + dns.add_child_elem(validation) + try: + self.server.invoke_successfully(dns, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying dns: %s' % to_native(error), exception=traceback.format_exc()) + + def apply(self): + dns_attrs = self.get_dns() + cd_action = self.na_helper.get_cd_action(dns_attrs, self.parameters) + modify = None + if cd_action is None: + modify = self.na_helper.get_modified_attributes(dns_attrs, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_dns() + elif cd_action == 'delete': + self.destroy_dns(dns_attrs) + else: + self.modify_dns(dns_attrs) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Create, Delete, Modify DNS servers. + """ + obj = NetAppOntapDns() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py new file mode 100644 index 000000000..67e238794 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_domain_tunnel.py @@ -0,0 +1,168 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_domain_tunnel +short_description: NetApp ONTAP domain tunnel +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify the domain tunnel. +options: + state: + description: + - Whether the domain tunnel should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - The name of the vserver that the domain tunnel should be created or deleted on. + required: true + type: str +''' + +EXAMPLES = """ + - name: Create Domain Tunnel + na_ontap_domain_tunnel: + state: present + vserver: svm1 + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapDomainTunnel(object): + + def __init__(self): + """ + Initialize the ONTAP domain tunnel class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_domain_tunnel', '9.7')) + + def get_domain_tunnel(self): + """ + Get the current domain tunnel info + """ + api = "/security/authentication/cluster/ad-proxy" + message, error = self.rest_api.get(api) + + if error: + if int(error['code']) != 4: # error code 4 is empty table + self.module.fail_json(msg=error) + if message: + message = { + 'vserver': message['svm']['name'] + } + return message + else: + return None + + def create_domain_tunnel(self): + """ + Creates the domain tunnel on the specified vserver + """ + api = "/security/authentication/cluster/ad-proxy" + body = { + "svm": { + "name": self.parameters['vserver'] + } + } + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def modify_domain_tunnel(self): + """ + Modifies the domain tunnel on the specified vserver + """ + api = "/security/authentication/cluster/ad-proxy" + body = { + "svm": { + "name": self.parameters['vserver'] + } + } + dummy, error = self.rest_api.patch(api, body) + if error: + self.module.fail_json(msg=error) + + def delete_domain_tunnel(self): + """ + Deletes the current domain tunnel + """ + api = "/security/authentication/cluster/ad-proxy" + + dummy, error = self.rest_api.delete(api) + if error: + self.module.fail_json(msg=error) + + def apply(self): + current = self.get_domain_tunnel() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.create_domain_tunnel() + elif cd_action == 'delete': + self.delete_domain_tunnel() + elif modify: + self.modify_domain_tunnel() + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp ONTAP Domain Tunnel and runs the correct playbook task + """ + obj = NetAppOntapDomainTunnel() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py new file mode 100644 index 000000000..4e337cb9a --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_efficiency_policy.py @@ -0,0 +1,414 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_efficiency_policy +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_efficiency_policy +short_description: NetApp ONTAP manage efficiency policies (sis policies) +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Modify/Delete efficiency policies (sis policies) +options: + state: + description: + - Whether the specified efficiency policy should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + policy_name: + description: + - the name of the efficiency policy + required: true + type: str + + comment: + description: + - A brief description of the policy. + type: str + + duration: + description: + - The duration in hours for which the scheduled efficiency operation should run. + After this time expires, the efficiency operation will be stopped even if the operation is incomplete. + If '-' is specified as the duration, the efficiency operation will run till it completes. Otherwise, the duration has to be an integer greater than 0. + By default, the operation runs till it completes. + type: str + + enabled: + description: + - If the value is true, the efficiency policy is active in this cluster. + If the value is false this policy will not be activated by the schedulers and hence will be inactive. + type: bool + + policy_type: + description: + - The policy type reflects the reason a volume using this policy will start processing a changelog. + - (Changelog processing is identifying and eliminating duplicate blocks which were written since the changelog was last processed.) + - threshold Changelog processing occurs once the changelog reaches a certain percent full. + - scheduled Changelog processing will be triggered by time. + choices: ['threshold', 'scheduled'] + type: str + + qos_policy: + description: + - QoS policy for the efficiency operation. + - background efficiency operation will run in background with minimal or no impact on data serving client operations, + - best-effort efficiency operations may have some impact on data serving client operations. + choices: ['background', 'best_effort'] + type: str + + schedule: + description: + - Cron type job schedule name. When the associated policy is set on a volume, the efficiency operation will be triggered for the volume on this schedule. + - These schedules can be created using the na_ontap_job_schedule module + type: str + + vserver: + description: + - Name of the vserver to use. + required: true + type: str + + changelog_threshold_percent: + description: + - Specifies the percentage at which the changelog will be processed for a threshold type of policy, tested once each hour. + type: int + version_added: '19.11.0' +''' + +EXAMPLES = """ + - name: Create threshold efficiency policy + netapp.ontap.na_ontap_efficiency_policy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + vserver: ansible + state: present + policy_name: test + comment: This policy is for x and y + enabled: true + policy_type: threshold + qos_policy: background + changelog_threshold_percent: 20 + + - name: Create Scheduled efficiency Policy + netapp.ontap.na_ontap_efficiency_policy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + vserver: ansible + state: present + policy_name: test2 + comment: This policy is for x and y + enabled: true + schedule: new_job_schedule + duration: 1 + policy_type: scheduled + qos_policy: background +""" + +RETURN = """ +""" + +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapEfficiencyPolicy(object): + """ + Create, delete and modify efficiency policy + """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + policy_name=dict(required=True, type='str'), + comment=dict(required=False, type='str'), + duration=dict(required=False, type='str'), + enabled=dict(required=False, type='bool'), + policy_type=dict(required=False, choices=['threshold', 'scheduled']), + qos_policy=dict(required=False, choices=['background', 'best_effort']), + schedule=dict(required=False, type='str'), + vserver=dict(required=True, type='str'), + changelog_threshold_percent=dict(required=False, type='int') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('changelog_threshold_percent', 'duration'), ('changelog_threshold_percent', 'schedule')] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.uuid = None + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0): + msg = 'REST requires ONTAP 9.8 or later for efficiency_policy APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if self.parameters.get('policy_type') and self.parameters['state'] == 'present': + if self.parameters['policy_type'] == 'threshold': + if self.parameters.get('duration'): + self.module.fail_json(msg="duration cannot be set if policy_type is threshold") + if self.parameters.get('schedule'): + self.module.fail_json(msg='schedule cannot be set if policy_type is threshold') + # if policy_type is 'scheduled' + else: + if self.parameters.get('changelog_threshold_percent'): + self.module.fail_json(msg='changelog_threshold_percent cannot be set if policy_type is scheduled') + + # if duration not set for a policy, ZAPI returns "-", whereas REST returns 0. + # "-" is an invalid value in REST, set to 0 if REST. + if self.parameters.get('duration') == "-" and self.use_rest: + self.parameters['duration'] = '0' + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + self.set_playbook_zapi_key_map() + + def set_playbook_zapi_key_map(self): + + self.na_helper.zapi_int_keys = { + 'changelog_threshold_percent': 'changelog-threshold-percent' + } + self.na_helper.zapi_str_keys = { + 'policy_name': 'policy-name', + 'comment': 'comment', + 'policy_type': 'policy-type', + 'qos_policy': 'qos-policy', + 'schedule': 'schedule', + 'duration': 'duration' + } + self.na_helper.zapi_bool_keys = { + 'enabled': 'enabled' + } + + def get_efficiency_policy(self): + """ + Get a efficiency policy + :return: a efficiency-policy info + """ + if self.use_rest: + return self.get_efficiency_policy_rest() + sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-get-iter") + query = netapp_utils.zapi.NaElement("query") + sis_policy_info = netapp_utils.zapi.NaElement("sis-policy-info") + sis_policy_info.add_new_child("policy-name", self.parameters['policy_name']) + sis_policy_info.add_new_child("vserver", self.parameters['vserver']) + query.add_child_elem(sis_policy_info) + sis_policy_obj.add_child_elem(query) + try: + results = self.server.invoke_successfully(sis_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error searching for efficiency policy %s: %s" % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + return_value = {} + if results.get_child_by_name('num-records') and int(results.get_child_content('num-records')) == 1: + attributes_list = results.get_child_by_name('attributes-list') + sis_info = attributes_list.get_child_by_name('sis-policy-info') + for option, zapi_key in self.na_helper.zapi_int_keys.items(): + return_value[option] = self.na_helper.get_value_for_int(from_zapi=True, value=sis_info.get_child_content(zapi_key)) + for option, zapi_key in self.na_helper.zapi_bool_keys.items(): + return_value[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=sis_info.get_child_content(zapi_key)) + for option, zapi_key in self.na_helper.zapi_str_keys.items(): + return_value[option] = sis_info.get_child_content(zapi_key) + return return_value + return None + + def get_efficiency_policy_rest(self): + api = 'storage/volume-efficiency-policies' + query = {'name': self.parameters['policy_name'], 'svm.name': self.parameters['vserver']} + fields = 'name,type,start_threshold_percent,qos_policy,schedule,comment,duration,enabled' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error searching for efficiency policy %s: %s" % (self.parameters['policy_name'], error)) + if record: + self.uuid = record['uuid'] + current = { + 'policy_name': record['name'], + 'policy_type': record['type'], + 'qos_policy': record['qos_policy'], + 'schedule': record['schedule']['name'] if 'schedule' in record else None, + 'enabled': record['enabled'], + 'duration': str(record['duration']) if 'duration' in record else None, + 'changelog_threshold_percent': record['start_threshold_percent'] if 'start_threshold_percent' in record else None, + 'comment': record['comment'] + } + return current + return None + + def create_efficiency_policy(self): + """ + Creates a efficiency policy + :return: None + """ + if self.use_rest: + return self.create_efficiency_policy_rest() + sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-create") + for option, zapi_key in self.na_helper.zapi_int_keys.items(): + if self.parameters.get(option): + sis_policy_obj.add_new_child(zapi_key, + self.na_helper.get_value_for_int(from_zapi=False, + value=self.parameters[option])) + for option, zapi_key in self.na_helper.zapi_bool_keys.items(): + if self.parameters.get(option): + sis_policy_obj.add_new_child(zapi_key, + self.na_helper.get_value_for_bool(from_zapi=False, + value=self.parameters[option])) + for option, zapi_key in self.na_helper.zapi_str_keys.items(): + if self.parameters.get(option): + sis_policy_obj.add_new_child(zapi_key, str(self.parameters[option])) + try: + self.server.invoke_successfully(sis_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error creating efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)), + exception=traceback.format_exc()) + + def create_efficiency_policy_rest(self): + api = 'storage/volume-efficiency-policies' + body = { + 'svm.name': self.parameters['vserver'], + 'name': self.parameters['policy_name'] + } + create_or_modify_body = self.form_create_or_modify_body(self.parameters) + if create_or_modify_body: + body.update(create_or_modify_body) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating efficiency policy %s: %s" % (self.parameters["policy_name"], error)) + + def form_create_or_modify_body(self, create_or_modify): + """ + Form body contents for create or modify efficiency policy. + :return: create or modify body. + """ + body = {} + if 'comment' in create_or_modify: + body['comment'] = create_or_modify['comment'] + if 'duration' in create_or_modify: + body['duration'] = create_or_modify['duration'] + if 'enabled' in create_or_modify: + body['enabled'] = create_or_modify['enabled'] + if 'qos_policy' in create_or_modify: + body['qos_policy'] = create_or_modify['qos_policy'] + if 'schedule' in create_or_modify: + body['schedule'] = {'name': create_or_modify['schedule']} + if 'changelog_threshold_percent' in create_or_modify: + body['start_threshold_percent'] = create_or_modify['changelog_threshold_percent'] + if 'policy_type' in create_or_modify: + body['type'] = create_or_modify['policy_type'] + return body + + def delete_efficiency_policy(self): + """ + Delete a efficiency Policy + :return: None + """ + if self.use_rest: + return self.delete_efficiency_policy_rest() + sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-delete") + sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name']) + try: + self.server.invoke_successfully(sis_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error deleting efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)), + exception=traceback.format_exc()) + + def delete_efficiency_policy_rest(self): + api = 'storage/volume-efficiency-policies' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg="Error deleting efficiency policy %s: %s" % (self.parameters["policy_name"], error)) + + def modify_efficiency_policy(self, modify): + """ + Modify a efficiency policy + :return: None + """ + if self.use_rest: + return self.modify_efficiency_policy_rest(modify) + sis_policy_obj = netapp_utils.zapi.NaElement("sis-policy-modify") + sis_policy_obj.add_new_child("policy-name", self.parameters['policy_name']) + for attribute in modify: + sis_policy_obj.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute])) + try: + self.server.invoke_successfully(sis_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error modifying efficiency policy %s: %s" % (self.parameters["policy_name"], to_native(error)), + exception=traceback.format_exc()) + + @staticmethod + def attribute_to_name(attribute): + return str.replace(attribute, '_', '-') + + def validate_modify(self, current, modify): + """ + sis-policy-create zapi pre-checks the options and fails if it's not supported. + is-policy-modify pre-checks one of the options, but tries to modify the others even it's not supported. And it will mess up the vsim. + Do the checks before sending to the zapi. + This checks applicable for REST modify too. + """ + if current['policy_type'] == 'scheduled' and self.parameters.get('policy_type') != 'threshold': + if modify.get('changelog_threshold_percent'): + self.module.fail_json(msg="changelog_threshold_percent cannot be set if policy_type is scheduled") + elif current['policy_type'] == 'threshold' and self.parameters.get('policy_type') != 'scheduled': + if modify.get('duration'): + self.module.fail_json(msg="duration cannot be set if policy_type is threshold") + elif modify.get('schedule'): + self.module.fail_json(msg="schedule cannot be set if policy_type is threshold") + + def modify_efficiency_policy_rest(self, modify): + api = 'storage/volume-efficiency-policies' + body = self.form_create_or_modify_body(modify) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg="Error modifying efficiency policy %s: %s" % (self.parameters["policy_name"], error)) + + def apply(self): + current = self.get_efficiency_policy() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify: + self.validate_modify(current, modify) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_efficiency_policy() + elif cd_action == 'delete': + self.delete_efficiency_policy() + elif modify: + self.modify_efficiency_policy(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapEfficiencyPolicy() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py new file mode 100644 index 000000000..76ddfa31b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_destination.py @@ -0,0 +1,199 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_ems_destination +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_ems_destination +short_description: NetApp ONTAP configuration for EMS event destination +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.23.0 +author: Bartosz Bielawski (@bielawb) +description: + - Configure EMS destination. Currently certificate authentication for REST is not supported. +options: + state: + description: + - Whether the destination should be present or not. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - Name of the EMS destination. + required: true + type: str + type: + description: + - Type of the EMS destination. + choices: ['email', 'syslog', 'rest_api'] + required: true + type: str + destination: + description: + - Destination - content depends on the type. + required: true + type: str + filters: + description: + - List of filters that destination is linked to. + required: true + type: list + elements: str +''' + +EXAMPLES = """ + - name: Configure REST EMS destination + netapp.ontap.na_ontap_ems_destination: + state: present + name: rest + type: rest_api + filters: ['important_events'] + destination: http://my.rest.api/address + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + + - name: Remove email EMS destination + netapp.ontap.na_ontap_ems_destination: + state: absent + name: email_destination + type: email + filters: ['important_events'] + destination: netapp@company.com + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" +""" + +RETURN = """ + +""" +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapEmsDestination: + """Create/Modify/Remove EMS destination""" + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + type=dict(required=True, type='str', choices=['email', 'syslog', 'rest_api']), + destination=dict(required=True, type='str'), + filters=dict(required=True, type='list', elements='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg='na_ontap_ems_destination is only supported with REST API') + + def fail_on_error(self, error, action): + if error is None: + return + self.module.fail_json(msg="Error %s: %s" % (action, error)) + + def generate_filters_list(self, filters): + return [{'name': filter} for filter in filters] + + def get_ems_destination(self, name): + api = 'support/ems/destinations' + fields = 'name,type,destination,filters.name' + query = dict(name=name, fields=fields) + record, error = rest_generic.get_one_record(self.rest_api, api, query) + self.fail_on_error(error, 'fetching EMS destination for %s' % name) + if record: + current = { + 'name': self.na_helper.safe_get(record, ['name']), + 'type': self.na_helper.safe_get(record, ['type']), + 'destination': self.na_helper.safe_get(record, ['destination']), + 'filters': None + } + # 9.9.0 and earlier versions returns rest-api, convert it to rest_api. + if current['type'] and '-' in current['type']: + current['type'] = current['type'].replace('-', '_') + if self.na_helper.safe_get(record, ['filters']): + current['filters'] = [filter['name'] for filter in record['filters']] + return current + return None + + def create_ems_destination(self): + api = 'support/ems/destinations' + name = self.parameters['name'] + body = { + 'name': name, + 'type': self.parameters['type'], + 'destination': self.parameters['destination'], + 'filters': self.generate_filters_list(self.parameters['filters']) + } + dummy, error = rest_generic.post_async(self.rest_api, api, body) + self.fail_on_error(error, 'creating EMS destinations for %s' % name) + + def delete_ems_destination(self, name): + api = 'support/ems/destinations' + dummy, error = rest_generic.delete_async(self.rest_api, api, name) + self.fail_on_error(error, 'deleting EMS destination for %s' % name) + + def modify_ems_destination(self, name, modify): + if 'type' in modify: + # changing type is not supported + self.delete_ems_destination(name) + self.create_ems_destination() + else: + body = {} + for option in modify: + if option == 'filters': + body[option] = self.generate_filters_list(modify[option]) + else: + body[option] = modify[option] + if body: + api = 'support/ems/destinations' + dummy, error = rest_generic.patch_async(self.rest_api, api, name, body) + self.fail_on_error(error, 'modifying EMS destination for %s' % name) + + def apply(self): + name = None + modify = None + current = self.get_ems_destination(self.parameters['name']) + name = self.parameters['name'] + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + saved_modify = str(modify) + if self.na_helper.changed and not self.module.check_mode: + if modify: + self.modify_ems_destination(name, modify) + elif cd_action == 'create': + self.create_ems_destination() + else: + self.delete_ems_destination(name) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, saved_modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapEmsDestination() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py new file mode 100644 index 000000000..bdd3a73c3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ems_filter.py @@ -0,0 +1,250 @@ +#!/usr/bin/python + +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_ems_filter +short_description: NetApp ONTAP EMS Filter +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 22.4.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create, delete, or modify EMS filters on NetApp ONTAP. This module only supports REST. +notes: + - This module only supports REST. + +options: + state: + description: + - Whether the specified user should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - Name of the EMS Filter + required: True + type: str + + rules: + description: List of EMS filter rules + type: list + elements: dict + suboptions: + index: + description: Index of rule + type: int + required: True + type: + description: The type of rule + type: str + choices: ['include', 'exclude'] + required: True + message_criteria: + description: Message criteria for EMS filter, required one of severities, name_pattern when creating ems filter. + type: dict + suboptions: + severities: + description: comma separated string of severities this rule applies to + type: str + name_pattern: + description: Name pattern to apply rule to + type: str +''' + +EXAMPLES = """ + - name: Create EMS filter + netapp.ontap.na_ontap_ems_filter: + state: present + name: carchi_ems + rules: + - index: 1 + type: include + message_criteria: + severities: "error" + name_pattern: "callhome.*" + - index: 2 + type: include + message_criteria: + severities: "EMERGENCY" + + - name: Modify EMS filter add rule + netapp.ontap.na_ontap_ems_filter: + state: present + name: carchi_ems + rules: + - index: 1 + type: include + message_criteria: + severities: "error" + name_pattern: "callhome.*" + - index: 2 + type: include + message_criteria: + severities: "EMERGENCY" + - index: 3 + type: include + message_criteria: + severities: "ALERT" + + - name: Delete EMS Filter + netapp.ontap.na_ontap_ems_filter: + state: absent + name: carchi_ems +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapEMSFilters: + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + rules=dict(type='list', elements='dict', options=dict( + index=dict(required=True, type="int"), + type=dict(required=True, type="str", choices=['include', 'exclude']), + message_criteria=dict(type="dict", options=dict( + severities=dict(required=False, type="str"), + name_pattern=dict(required=False, type="str") + )) + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if not self.use_rest: + self.module.fail_json(msg="This module require REST with ONTAP 9.6 or higher") + + def get_ems_filter(self): + api = 'support/ems/filters' + params = {'name': self.parameters['name'], + 'fields': "rules"} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error fetching ems filter %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return record + + def create_ems_filter(self): + api = 'support/ems/filters' + body = {'name': self.parameters['name']} + if self.parameters.get('rules'): + body['rules'] = self.na_helper.filter_out_none_entries(self.parameters['rules']) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating EMS filter %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_ems_filter(self): + api = 'support/ems/filters' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name']) + if error: + self.module.fail_json(msg='Error deleting EMS filter %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_ems_filter(self): + # only variable other than name is rules, so if we hit this we know rules has been changed + api = 'support/ems/filters' + body = {'rules': self.na_helper.filter_out_none_entries(self.parameters['rules'])} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body) + if error: + self.module.fail_json(msg='Error modifying EMS filter %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def find_modify(self, current): + # The normal modify will not work for 2 reasons + # First ems filter will add a new rule at the end that excludes everything that there isn't a rule for + # Second Options that are not given are returned as '*' in rest + if not current: + return False + # Modify Current to remove auto added rule, from testing it always appears to be the last element + if current.get('rules'): + current['rules'].pop() + # Next check if both have no rules + if current.get('rules') is None and self.parameters.get('rules') is None: + return False + # Next let check if rules is the same size if not we need to modify + if len(current.get('rules')) != len(self.parameters.get('rules')): + return True + # Next let put the current rules in a dictionary by rule number + current_rules = self.dic_of_rules(current) + # Now we need to compare each field to see if there is a match + modify = False + for rule in self.parameters['rules']: + # allow modify if a desired rule index may not exist in current rules. + # when testing found only index 1, 2 are allowed, if try to set index other than this, let REST throw error. + if current_rules.get(rule['index']) is None: + modify = True + break + # Check if types are the same + if rule['type'].lower() != current_rules[rule['index']]['type'].lower(): + modify = True + break + if rule.get('message_criteria'): + if rule['message_criteria'].get('severities') and rule['message_criteria']['severities'].lower() != \ + current_rules[rule['index']]['message_criteria']['severities'].lower(): + modify = True + break + if rule['message_criteria'].get('name_pattern') and rule['message_criteria']['name_pattern'] != \ + current_rules[rule['index']]['message_criteria']['name_pattern']: + modify = True + break + return modify + + def dic_of_rules(self, current): + rules = {} + for rule in current['rules']: + rules[rule['index']] = rule + return rules + + def apply(self): + current = self.get_ems_filter() + cd_action, modify = None, False + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.find_modify(current) + if modify: + self.na_helper.changed = True + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_ems_filter() + if cd_action == 'delete': + self.delete_ems_filter() + if modify: + self.modify_ems_filter() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply volume operations from playbook''' + obj = NetAppOntapEMSFilters() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py new file mode 100644 index 000000000..3b182e13c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy.py @@ -0,0 +1,274 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_export_policy +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_export_policy +short_description: NetApp ONTAP manage export-policy +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create or destroy or rename export-policies on ONTAP +options: + state: + description: + - Whether the specified export policy should exist or not. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - The name of the export-policy to manage. + type: str + required: true + from_name: + description: + - The name of the export-policy to be renamed. + type: str + version_added: 2.7.0 + vserver: + required: true + type: str + description: + - Name of the vserver to use. +''' + +EXAMPLES = """ + - name: Create Export Policy + netapp.ontap.na_ontap_export_policy: + state: present + name: ansiblePolicyName + vserver: vs_hack + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Rename Export Policy + netapp.ontap.na_ontap_export_policy: + state: present + from_name: ansiblePolicyName + vserver: vs_hack + name: newPolicyName + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete Export Policy + netapp.ontap.na_ontap_export_policy: + state: absent + name: ansiblePolicyName + vserver: vs_hack + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPExportPolicy(): + """ + Class with export policy methods + """ + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str', default=None), + vserver=dict(required=True, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + elif HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_export_policy(self, name=None): + """ + Return details about the export-policy + :param: + name : Name of the export-policy + :return: Details about the export-policy. None if not found. + :rtype: dict + """ + if name is None: + name = self.parameters['name'] + if self.use_rest: + return self.get_export_policy_rest(name) + else: + export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter') + export_policy_info = netapp_utils.zapi.NaElement('export-policy-info') + export_policy_info.add_new_child('policy-name', name) + export_policy_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(export_policy_info) + export_policy_iter.add_child_elem(query) + result = self.server.invoke_successfully(export_policy_iter, True) + return_value = None + # check if query returns the expected export-policy + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + export_policy = result.get_child_by_name('attributes-list').get_child_by_name( + 'export-policy-info').get_child_by_name('policy-name') + return_value = { + 'policy-name': export_policy + } + return return_value + + def create_export_policy(self): + """ + Creates an export policy + """ + if self.use_rest: + return self.create_export_policy_rest() + export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-policy-create', **{'policy-name': self.parameters['name']}) + try: + self.server.invoke_successfully(export_policy_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error on creating export-policy %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_export_policy(self, current): + """ + Delete export-policy + """ + if self.use_rest: + return self.delete_export_policy_rest(current) + export_policy_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-policy-destroy', **{'policy-name': self.parameters['name'], }) + try: + self.server.invoke_successfully(export_policy_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error on deleting export-policy %s: %s' + % (self.parameters['name'], + to_native(error)), exception=traceback.format_exc()) + + def rename_export_policy(self): + """ + Rename the export-policy. + """ + export_policy_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-policy-rename', **{'policy-name': self.parameters['from_name'], + 'new-policy-name': self.parameters['name']}) + try: + self.server.invoke_successfully(export_policy_rename, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error on renaming export-policy %s:%s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_export_policy_rest(self, name): + options = {'fields': 'name,id', + 'svm.name': self.parameters['vserver'], + 'name': name} + api = 'protocols/nfs/export-policies/' + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg="Error on fetching export policy: %s" % error) + if record: + return { + 'name': record['name'], + 'id': record['id'] + } + else: + return record + + def create_export_policy_rest(self): + params = {'name': self.parameters['name'], + 'svm.name': self.parameters['vserver']} + api = 'protocols/nfs/export-policies' + dummy, error = rest_generic.post_async(self.rest_api, api, params) + if error is not None: + self.module.fail_json(msg="Error on creating export policy: %s" % error) + + def delete_export_policy_rest(self, current): + policy_id = current['id'] + api = 'protocols/nfs/export-policies' + dummy, error = rest_generic.delete_async(self.rest_api, api, policy_id) + if error is not None: + self.module.fail_json(msg=" Error on deleting export policy: %s" % error) + + def rename_export_policy_rest(self, current): + policy_id = current['id'] + params = {'name': self.parameters['name']} + api = 'protocols/nfs/export-policies' + dummy, error = rest_generic.patch_async(self.rest_api, api, policy_id, params) + if error is not None: + self.module.fail_json(msg="Error on renaming export policy: %s" % error) + + def apply(self): + """ + Apply action to export-policy + """ + modify, rename = None, None + current = self.get_export_policy() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + current = self.get_export_policy(self.parameters['from_name']) + if current is None: + self.module.fail_json( + msg="Error renaming: export policy %s does not exist" % self.parameters['from_name']) + rename = True + + if self.na_helper.changed and not self.module.check_mode: + if rename: + modify = {'name': self.parameters['name']} + if self.use_rest: + self.rename_export_policy_rest(current) + else: + self.rename_export_policy() + elif cd_action == 'create': + self.create_export_policy() + elif cd_action == 'delete': + self.delete_export_policy(current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action + """ + export_policy = NetAppONTAPExportPolicy() + export_policy.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py new file mode 100644 index 000000000..8b9414074 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_export_policy_rule.py @@ -0,0 +1,745 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_export_policy_rule +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_export_policy_rule + +short_description: NetApp ONTAP manage export policy rules +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create or delete or modify export rules in ONTAP + +options: + state: + description: + - Whether the specified export policy rule should exist or not. + required: false + choices: ['present', 'absent'] + type: str + default: present + + name: + description: + - The name of the export policy this rule will be added to (or modified, or removed from). + required: True + type: str + aliases: + - policy_name + + client_match: + description: + - List of Client Match host names, IP Addresses, Netgroups, or Domains. + type: list + elements: str + + anonymous_user_id: + description: + - User name or ID to which anonymous users are mapped. Default value is '65534'. + type: str + + ro_rule: + description: + - List of Read only access specifications for the rule + choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys'] + type: list + elements: str + + rw_rule: + description: + - List of Read Write access specifications for the rule + choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys'] + type: list + elements: str + + super_user_security: + description: + - List of Read Write access specifications for the rule + choices: ['any','none','never','krb5','krb5i','krb5p','ntlm','sys'] + type: list + elements: str + + allow_suid: + description: + - If 'true', NFS server will honor SetUID bits in SETATTR operation. Default value on creation is 'true' + type: bool + + protocol: + description: + - List of Client access protocols. + - Default value is set to 'any' during create. + choices: [any,nfs,nfs3,nfs4,cifs,flexcache] + type: list + elements: str + aliases: + - protocols + + rule_index: + description: + - Index of the export policy rule. + - When rule_index is not set, we try to find a rule with an exact match. + If found, no action is taken with state set to present, and the rule is deleted with state set to absent. + An error is reported if more than one rule is found. + - When rule_index is set and state is present, if a rule cannot be found with this index, + we try to find a rule with an exact match and assign the index to this rule if found. + If no match is found, a new rule is created. + - All attributes that are set are used for an exact match. As a minimum, client_match, ro_rule, and rw_rule are required. + type: int + + from_rule_index: + description: + - index of the export policy rule to be re-indexed + type: int + version_added: 21.20.0 + + vserver: + description: + - Name of the vserver to use. + required: true + type: str + + ntfs_unix_security: + description: + - NTFS export UNIX security options. + - With REST, supported from ONTAP 9.9.1 version. + type: str + choices: ['fail', 'ignore'] + version_added: 21.18.0 + + force_delete_on_first_match: + description: + - when rule_index is not set, the default is to report an error on multiple matches. + - when this option is set, one of the rules with an exact match is deleted when state is absent. + - ignored when state is present. + type: bool + default: false + version_added: 21.23.0 + + chown_mode: + description: + - Specifies who is authorized to change the ownership mode of a file. + - With REST, supported from ONTAP 9.9.1 version. + type: str + choices: ['restricted', 'unrestricted'] + version_added: 22.0.0 + + allow_device_creation: + description: + - Specifies whether or not device creation is allowed. + - default is true. + - With REST, supported from ONTAP 9.9.1 version. + type: bool + version_added: 22.0.0 +''' + +EXAMPLES = """ + - name: Create ExportPolicyRule + netapp.ontap.na_ontap_export_policy_rule: + state: present + name: default123 + rule_index: 100 + vserver: ci_dev + client_match: 0.0.0.0/0,1.1.1.0/24 + ro_rule: krb5,krb5i + rw_rule: any + protocol: nfs,nfs3 + super_user_security: any + anonymous_user_id: 65534 + allow_suid: true + ntfs_unix_security: ignore + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify ExportPolicyRule + netapp.ontap.na_ontap_export_policy_rule: + state: present + name: default123 + rule_index: 100 + client_match: 0.0.0.0/0 + anonymous_user_id: 65521 + ro_rule: ntlm + rw_rule: any + protocol: any + allow_suid: false + ntfs_unix_security: fail + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: rename ExportPolicyRule index + netapp.ontap.na_ontap_export_policy_rule: + state: present + name: default123 + from_rule_index: 100 + rule_index: 99 + client_match: 0.0.0.0/0 + anonymous_user_id: 65521 + ro_rule: ntlm + rw_rule: any + protocol: any + allow_suid: false + ntfs_unix_security: fail + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete ExportPolicyRule + netapp.ontap.na_ontap_export_policy_rule: + state: absent + name: default123 + rule_index: 99 + vserver: ci_dev + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppontapExportRule: + ''' object initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str', aliases=['policy_name']), + protocol=dict(required=False, + type='list', elements='str', default=None, + choices=['any', 'nfs', 'nfs3', 'nfs4', 'cifs', 'flexcache'], + aliases=['protocols']), + client_match=dict(required=False, type='list', elements='str'), + ro_rule=dict(required=False, + type='list', elements='str', default=None, + choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']), + rw_rule=dict(required=False, + type='list', elements='str', default=None, + choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']), + super_user_security=dict(required=False, + type='list', elements='str', default=None, + choices=['any', 'none', 'never', 'krb5', 'krb5i', 'krb5p', 'ntlm', 'sys']), + allow_suid=dict(required=False, type='bool'), + from_rule_index=dict(required=False, type='int'), + rule_index=dict(required=False, type='int'), + anonymous_user_id=dict(required=False, type='str'), + vserver=dict(required=True, type='str'), + ntfs_unix_security=dict(required=False, type='str', choices=['fail', 'ignore']), + force_delete_on_first_match=dict(required=False, type='bool', default=False), + chown_mode=dict(required=False, type='str', choices=['restricted', 'unrestricted']), + allow_device_creation=dict(required=False, type='bool'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.set_playbook_zapi_key_map() + self.policy_id = None + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + partially_supported_rest_properties = [['ntfs_unix_security', (9, 9, 1)], ['allow_suid', (9, 9, 1)], + ['allow_device_creation', (9, 9, 1)], ['chown_mode', (9, 9, 1)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + if 'rule_index' not in self.parameters: + self.fail_on_missing_required_params('matching (as rule_index is not specified) or creating') + + def fail_on_missing_required_params(self, action): + missing_keys = [key for key in ('client_match', 'ro_rule', 'rw_rule') if self.parameters.get(key) is None] + plural = 's' if len(missing_keys) > 1 else '' + if missing_keys: + self.module.fail_json(msg='Error: Missing required option%s for %s export policy rule: %s' % (plural, action, ', '.join(missing_keys))) + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_string_keys = { + 'anonymous_user_id': 'anonymous-user-id', + 'client_match': 'client-match', + 'name': 'policy-name', + 'ntfs_unix_security': 'export-ntfs-unix-security-ops', + 'chown_mode': 'export-chown-mode' + } + self.na_helper.zapi_list_keys = { + 'protocol': ('protocol', 'access-protocol'), + 'ro_rule': ('ro-rule', 'security-flavor'), + 'rw_rule': ('rw-rule', 'security-flavor'), + 'super_user_security': ('super-user-security', 'security-flavor'), + } + self.na_helper.zapi_bool_keys = { + 'allow_suid': 'is-allow-set-uid-enabled', + 'allow_device_creation': 'is-allow-dev-is-enabled' + } + self.na_helper.zapi_int_keys = { + 'rule_index': 'rule-index' + } + + @staticmethod + def set_dict_when_not_none(query, key, value): + if value is not None: + query[key] = value + + @staticmethod + def list_to_string(alist): + return ','.join(alist).replace(' ', '') if alist else '' + + def set_query_parameters(self, rule_index): + """ + Return dictionary of query parameters and + :return: + """ + query = { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + if rule_index is not None: + query['rule-index'] = rule_index + else: + for item_key, value in self.parameters.items(): + zapi_key = None + if item_key in self.na_helper.zapi_string_keys and item_key != 'client_match': + # ignore client_match as ZAPI query is string based and preserves order + zapi_key = self.na_helper.zapi_string_keys[item_key] + elif item_key in self.na_helper.zapi_bool_keys: + zapi_key = self.na_helper.zapi_bool_keys[item_key] + value = self.na_helper.get_value_for_bool(from_zapi=False, value=value) + # skipping int keys to not include rule index in query as we're matching on attributes + elif item_key in self.na_helper.zapi_list_keys: + zapi_key, child_key = self.na_helper.zapi_list_keys[item_key] + value = [{child_key: item} for item in value] if value else None + if zapi_key: + self.set_dict_when_not_none(query, zapi_key, value) + + return { + 'query': { + 'export-rule-info': query + } + } + + def get_export_policy_rule(self, rule_index): + """ + Return details about the export policy rule + If rule_index is None, fetch policy based on attributes + :param: + name : Name of the export_policy + :return: Details about the export_policy. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_export_policy_rule_rest(rule_index) + result = None + rule_iter = netapp_utils.zapi.NaElement('export-rule-get-iter') + query = self.set_query_parameters(rule_index) + rule_iter.translate_struct(query) + try: + result = self.server.invoke_successfully(rule_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting export policy rule %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if result is not None and result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + if rule_index is None: + return self.match_export_policy_rule_exactly(result.get_child_by_name('attributes-list').get_children(), query, is_rest=False) + return self.zapi_export_rule_info_to_dict(result.get_child_by_name('attributes-list').get_child_by_name('export-rule-info')) + return None + + def zapi_export_rule_info_to_dict(self, rule_info): + current = {} + for item_key, zapi_key in self.na_helper.zapi_string_keys.items(): + current[item_key] = rule_info.get_child_content(zapi_key) + if item_key == 'client_match' and current[item_key]: + current[item_key] = current[item_key].split(',') + for item_key, zapi_key in self.na_helper.zapi_bool_keys.items(): + current[item_key] = self.na_helper.get_value_for_bool(from_zapi=True, + value=rule_info[zapi_key]) + for item_key, zapi_key in self.na_helper.zapi_int_keys.items(): + current[item_key] = self.na_helper.get_value_for_int(from_zapi=True, + value=rule_info[zapi_key]) + for item_key, zapi_key in self.na_helper.zapi_list_keys.items(): + parent, dummy = zapi_key + current[item_key] = self.na_helper.get_value_for_list(from_zapi=True, + zapi_parent=rule_info.get_child_by_name(parent)) + return current + + def set_export_policy_id(self): + """ + Fetch export-policy id + :param: + name : Name of the export-policy + + :return: Set self.policy_id + """ + if self.policy_id is not None: + return + if self.use_rest: + return self.set_export_policy_id_rest() + export_policy_iter = netapp_utils.zapi.NaElement('export-policy-get-iter') + attributes = { + 'query': { + 'export-policy-info': { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + } + } + + export_policy_iter.translate_struct(attributes) + try: + result = self.server.invoke_successfully(export_policy_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting export policy %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1: + self.policy_id = self.na_helper.safe_get(result, ['attributes-list', 'export-policy-info', 'policy-id']) + if self.policy_id is None: + self.module.fail_json(msg='Error getting export policy id for %s: got: %s.' + % (self.parameters['name'], result.to_string())) + + def add_parameters_for_create_or_modify(self, na_element_object, params): + """ + Add children node for create or modify NaElement object + :param na_element_object: modify or create NaElement object + :param values: dictionary of cron values to be added + :return: None + """ + for key, value in params.items(): + if key in self.na_helper.zapi_string_keys: + zapi_key = self.na_helper.zapi_string_keys.get(key) + # convert client_match list to comma-separated string + if value and key == 'client_match': + value = self.list_to_string(value) + elif key in self.na_helper.zapi_list_keys: + parent_key, child_key = self.na_helper.zapi_list_keys.get(key) + value = self.na_helper.get_value_for_list(from_zapi=False, zapi_parent=parent_key, zapi_child=child_key, data=value) + elif key in self.na_helper.zapi_int_keys: + zapi_key = self.na_helper.zapi_int_keys.get(key) + value = self.na_helper.get_value_for_int(from_zapi=False, value=value) + elif key in self.na_helper.zapi_bool_keys: + zapi_key = self.na_helper.zapi_bool_keys.get(key) + value = self.na_helper.get_value_for_bool(from_zapi=False, value=value) + else: + # ignore options that are not relevant + value = None + + if value is not None: + if key in self.na_helper.zapi_list_keys: + na_element_object.add_child_elem(value) + else: + na_element_object[zapi_key] = value + + def create_export_policy_rule(self): + """ + create rule for the export policy. + """ + if self.use_rest: + return self.create_export_policy_rule_rest() + export_rule_create = netapp_utils.zapi.NaElement('export-rule-create') + self.add_parameters_for_create_or_modify(export_rule_create, self.parameters) + try: + self.server.invoke_successfully(export_rule_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating export policy rule %s: %s' + % (self.parameters['name'], to_native(error)), exception=traceback.format_exc()) + + def create_export_policy(self): + """ + Creates an export policy + """ + if self.use_rest: + return self.create_export_policy_rest() + export_policy_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-policy-create', **{'policy-name': self.parameters['name']}) + try: + self.server.invoke_successfully(export_policy_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating export policy %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_export_policy_rule(self, rule_index): + """ + delete rule for the export policy. + """ + if self.use_rest: + return self.delete_export_policy_rule_rest(rule_index) + export_rule_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-rule-destroy', **{'policy-name': self.parameters['name'], + 'rule-index': str(rule_index)}) + + try: + self.server.invoke_successfully(export_rule_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting export policy rule %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_export_policy_rule(self, params, rule_index=None, rename=False): + ''' + Modify an existing export policy rule + :param params: dict() of attributes with desired values + :return: None + ''' + if self.use_rest: + return self.modify_export_policy_rule_rest(params, rule_index, rename) + params.pop('rule_index', None) + if params: + export_rule_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-rule-modify', **{'policy-name': self.parameters['name'], + 'rule-index': str(rule_index)}) + self.add_parameters_for_create_or_modify(export_rule_modify, params) + try: + self.server.invoke_successfully(export_rule_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying export policy rule index %s: %s' + % (rule_index, to_native(error)), + exception=traceback.format_exc()) + if rename: + export_rule_set_index = netapp_utils.zapi.NaElement.create_node_with_children( + 'export-rule-set-index', **{'policy-name': self.parameters['name'], + 'rule-index': str(self.parameters['from_rule_index']), + 'new-rule-index': str(self.parameters['rule_index'])}) + try: + self.server.invoke_successfully(export_rule_set_index, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error reindexing export policy rule index %s: %s' + % (self.parameters['from_rule_index'], to_native(error)), + exception=traceback.format_exc()) + + def set_export_policy_id_rest(self): + if self.policy_id is not None: + return + options = {'fields': 'name,id', + 'svm.name': self.parameters['vserver'], + 'name': self.parameters['name']} + api = 'protocols/nfs/export-policies' + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg="Error on fetching export policy: %s" % error) + if record: + self.policy_id = record['id'] + + def get_export_policy_rule_exact_match(self, query): + """ fetch rules based on attributes + REST queries only allow for one value at a time in a list, so: + 1. get a short list of matches using a simple query + 2. then look for an exact match + """ + api = 'protocols/nfs/export-policies/%s/rules' % self.policy_id + query.update(self.create_query(self.parameters)) + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query) + if error: + # If no rule matches the query, return None + if "entry doesn't exist" in error: + return None + self.module.fail_json(msg="Error on fetching export policy rules: %s" % error) + return self.match_export_policy_rule_exactly(records, query, is_rest=True) + + def match_export_policy_rule_exactly(self, records, query, is_rest): + if not records: + return None + founds = [] + for record in records: + record = self.filter_get_results(record) if is_rest else self.zapi_export_rule_info_to_dict(record) + modify = self.na_helper.get_modified_attributes(record, self.parameters) + modify.pop('rule_index', None) + if not modify: + founds.append(record) + if founds and len(founds) > 1 and not (self.parameters['state'] == 'absent' and self.parameters['force_delete_on_first_match']): + self.module.fail_json(msg='Error multiple records exist for query: %s. Specify index to modify or delete a rule. Found: %s' + % (query, founds)) + return founds[0] if founds else None + + def get_export_policy_rule_rest(self, rule_index): + self.set_export_policy_id_rest() + if not self.policy_id: + return None + query = {'fields': 'anonymous_user,clients,index,protocols,ro_rule,rw_rule,superuser'} + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + query['fields'] += ',ntfs_unix_security,allow_suid,chown_mode,allow_device_creation' + if rule_index is None: + return self.get_export_policy_rule_exact_match(query) + api = 'protocols/nfs/export-policies/%s/rules/%s' % (self.policy_id, rule_index) + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + # If rule index passed in doesn't exist, return None + if "entry doesn't exist" in error: + return None + self.module.fail_json(msg="Error on fetching export policy rule: %s" % error) + return self.filter_get_results(record) if record else None + + def filter_get_results(self, record): + record['rule_index'] = record.pop('index') + record['anonymous_user_id'] = record.pop('anonymous_user') + record['protocol'] = record.pop('protocols') + record['super_user_security'] = record.pop('superuser') + record['client_match'] = [each['match'] for each in record['clients']] + record.pop('clients') + return record + + def create_export_policy_rest(self): + body = {'name': self.parameters['name'], 'svm.name': self.parameters['vserver']} + api = 'protocols/nfs/export-policies' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating export policy: %s" % error) + + def create_export_policy_rule_rest(self): + api = 'protocols/nfs/export-policies/%s/rules?return_records=true' % self.policy_id + response, error = rest_generic.post_async(self.rest_api, api, self.create_body(self.parameters)) + if error: + self.module.fail_json(msg="Error on creating export policy rule: %s" % error) + # force a 'rename' to set the index + rule_index = None + if response and response.get('num_records') == 1: + rule_index = self.na_helper.safe_get(response, ['records', 0, 'index']) + if rule_index is None: + self.module.fail_json(msg="Error on creating export policy rule, returned response is invalid: %s" % response) + if self.parameters.get('rule_index'): + self.modify_export_policy_rule_rest({}, rule_index, True) + + def client_match_format(self, client_match): + return [{'match': each} for each in client_match] + + def delete_export_policy_rule_rest(self, rule_index): + api = 'protocols/nfs/export-policies/%s/rules' % self.policy_id + dummy, error = rest_generic. delete_async(self.rest_api, api, rule_index) + if error: + self.module.fail_json(msg="Error on deleting export policy Rule: %s" % error) + + def create_body(self, params): + body = self.create_body_or_query_common(params) + # lists + if params.get('protocol'): + body['protocols'] = self.parameters['protocol'] + if params.get('super_user_security'): + body['superuser'] = self.parameters['super_user_security'] + if params.get('client_match'): + body['clients'] = self.client_match_format(self.parameters['client_match']) + if params.get('ro_rule'): + body['ro_rule'] = self.parameters['ro_rule'] + if params.get('rw_rule'): + body['rw_rule'] = self.parameters['rw_rule'] + return body + + def create_query(self, params): + query = self.create_body_or_query_common(params) + # for list, do an initial query based on first element + if params.get('protocol'): + query['protocols'] = self.parameters['protocol'][0] + if params.get('super_user_security'): + query['superuser'] = self.parameters['super_user_security'][0] + if params.get('client_match'): + query['clients.match'] = self.parameters['client_match'][0] + if params.get('ro_rule'): + query['ro_rule'] = self.parameters['ro_rule'][0] + if params.get('rw_rule'): + query['rw_rule'] = self.parameters['rw_rule'][0] + return query + + def create_body_or_query_common(self, params): + result = {} + if params.get('anonymous_user_id') is not None: + result['anonymous_user'] = self.parameters['anonymous_user_id'] + if params.get('ntfs_unix_security') is not None: + result['ntfs_unix_security'] = self.parameters['ntfs_unix_security'] + if params.get('allow_suid') is not None: + result['allow_suid'] = self.parameters['allow_suid'] + if params.get('chown_mode') is not None: + result['chown_mode'] = self.parameters['chown_mode'] + if params.get('allow_device_creation') is not None: + result['allow_device_creation'] = self.parameters['allow_device_creation'] + return result + + def modify_export_policy_rule_rest(self, params, rule_index, rename=False): + api = 'protocols/nfs/export-policies/%s/rules' % self.policy_id + query = {'new_index': self.parameters['rule_index']} if rename else None + dummy, error = rest_generic.patch_async(self.rest_api, api, rule_index, self.create_body(params), query) + + if error: + self.module.fail_json(msg="Error on modifying export policy Rule: %s" % error) + + def apply(self): + ''' Apply required action from the play''' + current = self.get_export_policy_rule(self.parameters.get('rule_index')) + cd_action, rename, modify = None, None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + # if rule_index is not None, see if we need to re-index an existing rule + # the existing rule may be indexed by from_rule_index or we can match the attributes + if cd_action == 'create' and self.parameters.get('rule_index'): + from_current = self.get_export_policy_rule(self.parameters.get('from_rule_index')) + rename = self.na_helper.is_rename_action(from_current, current) + if rename is None and self.parameters.get('from_rule_index') is not None: + self.module.fail_json( + msg="Error reindexing: export policy rule %s does not exist." % self.parameters['from_rule_index']) + if rename: + current = from_current + cd_action = None + self.parameters['from_rule_index'] = current['rule_index'] + + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + self.set_export_policy_id() + if cd_action == 'create': + self.fail_on_missing_required_params('creating') + + if self.na_helper.changed and not self.module.check_mode: + # create export policy (if policy doesn't exist) only when changed=True + if rename: + self.modify_export_policy_rule(modify, self.parameters['from_rule_index'], rename=True) + elif cd_action == 'create': + if not self.policy_id: + self.create_export_policy() + self.set_export_policy_id() + self.create_export_policy_rule() + elif cd_action == 'delete': + self.delete_export_policy_rule(current['rule_index']) + elif modify: + self.modify_export_policy_rule(modify, current['rule_index']) + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + ''' Create object and call apply ''' + rule_obj = NetAppontapExportRule() + rule_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py new file mode 100644 index 000000000..7fee744cf --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fcp.py @@ -0,0 +1,275 @@ +#!/usr/bin/python + +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_fcp +short_description: NetApp ONTAP Start, Stop and Enable FCP services. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Start, Stop and Enable FCP services. +options: + state: + description: + - Whether the FCP should be enabled or not. + choices: ['present', 'absent'] + type: str + default: present + + status: + description: + - Whether the FCP should be up or down + choices: ['up', 'down'] + type: str + default: up + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + +''' + +EXAMPLES = """ + - name: create FCP + na_ontap_fcp: + state: present + status: down + hostname: "{{hostname}}" + username: "{{username}}" + password: "{{password}}" + vserver: "{{vservername}}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapFCP: + """ + Enable and Disable FCP + """ + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + status=dict(required=False, type='str', choices=['up', 'down'], default='up') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + elif HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def create_fcp(self): + """ + Create's and Starts an FCP + :return: none + """ + try: + self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-create'), True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating FCP: %s' % + (to_native(error)), + exception=traceback.format_exc()) + + def start_fcp(self): + """ + Starts an existing FCP + :return: none + """ + try: + self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-start'), True) + except netapp_utils.zapi.NaApiError as error: + # Error 13013 denotes fcp service already started. + if to_native(error.code) == "13013": + return None + else: + self.module.fail_json(msg='Error starting FCP %s' % (to_native(error)), + exception=traceback.format_exc()) + + def stop_fcp(self): + """ + Steps an Existing FCP + :return: none + """ + try: + self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-stop'), True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error Stoping FCP %s' % + (to_native(error)), + exception=traceback.format_exc()) + + def destroy_fcp(self): + """ + Destroys an already stopped FCP + :return: + """ + try: + self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-destroy'), True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error destroying FCP %s' % + (to_native(error)), + exception=traceback.format_exc()) + + def get_fcp(self): + if self.use_rest: + return self.get_fcp_rest() + fcp_obj = netapp_utils.zapi.NaElement('fcp-service-get-iter') + fcp_info = netapp_utils.zapi.NaElement('fcp-service-info') + fcp_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(fcp_info) + fcp_obj.add_child_elem(query) + result = self.server.invoke_successfully(fcp_obj, True) + # There can only be 1 FCP per vserver. If true, one is set up, else one isn't set up + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + return True + else: + return False + + def current_status(self): + try: + status = self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-status'), True) + return status.get_child_content('is-available') == 'true' + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error destroying FCP: %s' % + (to_native(error)), + exception=traceback.format_exc()) + + def status_to_bool(self): + return self.parameters['status'] == 'up' + + def get_fcp_rest(self): + options = {'fields': 'enabled,svm.uuid', + 'svm.name': self.parameters['vserver']} + api = 'protocols/san/fcp/services' + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg="Error on fetching fcp: %s" % error) + if record: + record['status'] = 'up' if record.pop('enabled') else 'down' + return record + + def create_fcp_rest(self): + params = {'svm.name': self.parameters['vserver'], + 'enabled': self.status_to_bool()} + api = 'protocols/san/fcp/services' + dummy, error = rest_generic.post_async(self.rest_api, api, params) + if error is not None: + self.module.fail_json(msg="Error on creating fcp: %s" % error) + + def destroy_fcp_rest(self, current): + api = 'protocols/san/fcp/services' + dummy, error = rest_generic.delete_async(self.rest_api, api, current['svm']['uuid']) + if error is not None: + self.module.fail_json(msg=" Error on deleting fcp policy: %s" % error) + + def start_stop_fcp_rest(self, enabled, current): + params = {'enabled': enabled} + api = 'protocols/san/fcp/services' + dummy, error = rest_generic.patch_async(self.rest_api, api, current['svm']['uuid'], params) + if error is not None: + self.module.fail_json(msg="Error on modifying fcp: %s" % error) + + def zapi_apply(self, current): + changed = False + # this is a mess i don't want to touch... + if self.parameters['state'] == 'present': + if current: + if self.parameters['status'] == 'up': + if not self.current_status(): + if not self.module.check_mode: + self.start_fcp() + changed = True + else: + if self.current_status(): + if not self.module.check_mode: + self.stop_fcp() + changed = True + else: + if not self.module.check_mode: + self.create_fcp() + if self.parameters['status'] == 'up': + self.start_fcp() + elif self.parameters['status'] == 'down': + self.stop_fcp() + changed = True + else: + if current: + if not self.module.check_mode: + if self.current_status(): + self.stop_fcp() + self.destroy_fcp() + changed = True + return changed + + def apply(self): + current = self.get_fcp() + if not self.use_rest: + changed = self.zapi_apply(current) + result = netapp_utils.generate_result(changed) + else: + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + changed = self.na_helper.changed + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_fcp_rest() + elif modify: + if modify['status'] == 'up': + self.start_stop_fcp_rest(True, current) + else: + self.start_stop_fcp_rest(False, current) + elif cd_action == 'delete': + if current['status'] == 'up': + self.start_stop_fcp_rest(False, current) + self.destroy_fcp_rest(current) + result = netapp_utils.generate_result(changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Start, Stop and Enable FCP services. + """ + obj = NetAppOntapFCP() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py new file mode 100644 index 000000000..9cf442185 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsd.py @@ -0,0 +1,173 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_fdsd +short_description: NetApp ONTAP create or remove a File Directory security descriptor. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or remove a security descriptor. +options: + state: + description: + - Whether the specified security descriptor should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + name: + description: + - Specifies the name of the security descriptor. + required: true + type: str + + vserver: + description: + - Specifies the vserver. + required: true + type: str +""" + +EXAMPLES = """ + - name: Create File Directory Security Descriptor + netapp.ontap.na_ontap_fdsd: + state: present + name: "ansible_sdl" + vserver: "svm1" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Delete File Directory Security Descriptor + netapp.ontap.na_ontap_fdsd: + state: absent + vserver: "svm1" + name: "ansible_sdl" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFDSD(): + """ + Creates or removes a File Directory Security Descriptor + """ + def __init__(self): + """ + Initialize the ONTAP File Directory Security Descripter class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdsd', '9.6')) + + def get_fdsd(self): + """ + Get File Directory Security Descriptor + """ + api = "private/cli/vserver/security/file-directory/ntfs" + query = { + 'ntfs-sd': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + + if error: + self.module.fail_json(msg=error) + + return records if records else None + + def add_fdsd(self): + """ + Adds a new File Directory Security Descriptor + """ + api = "private/cli/vserver/security/file-directory/ntfs" + body = { + 'ntfs-sd': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + dummy, error = self.rest_api.post(api, body) + + if error: + self.module.fail_json(msg=error) + + def remove_fdsd(self): + """ + Deletes a File Directory Security Descriptor + """ + api = "private/cli/vserver/security/file-directory/ntfs" + body = { + 'ntfs-sd': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + dummy, error = self.rest_api.delete(api, body) + + if error: + self.module.fail_json(msg=error) + + def apply(self): + current = self.get_fdsd() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.add_fdsd() + elif cd_action == 'delete': + self.remove_fdsd() + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates and removes File Directory Security Descriptors + """ + obj = NetAppOntapFDSD() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py new file mode 100644 index 000000000..4fac6fc90 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdsp.py @@ -0,0 +1,171 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_fdsp +short_description: NetApp ONTAP create or delete a file directory security policy +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create or delete a file directory security policy. +options: + state: + description: + - Whether the specified policy should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + name: + description: + - Specifies the name of the policy. + required: true + type: str + + vserver: + description: + - Specifies the vserver for the security policy. + required: true + type: str +""" + +EXAMPLES = """ + - name: Create File Directory Security Policy + netapp.ontap.na_ontap_fdsp: + state: present + name: "ansible_security_policyl" + vserver: "svm1" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Delete File Directory Security Policy + netapp.ontap.na_ontap_fdsp: + state: absent + vserver: "svm1" + name: "ansible_security_policyl" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFDSP(): + """ + Creates or Destroys a File Directory Security Policy + """ + def __init__(self): + """ + Initialize the ONTAP File Directory Security Policy class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdsp', '9.6')) + + def get_fdsp(self): + """ + Get File Directory Security Policy + """ + api = "private/cli/vserver/security/file-directory/policy" + query = { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + + if error: + self.module.fail_json(msg=error) + + return records if records else None + + def add_fdsp(self): + """ + Adds a new File Directory Security Policy + """ + api = "private/cli/vserver/security/file-directory/policy" + body = { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def remove_fdsp(self): + """ + Deletes a File Directory Security Policy + """ + api = "private/cli/vserver/security/file-directory/policy" + body = { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + + dummy, error = self.rest_api.delete(api, body) + if error: + self.module.fail_json(msg=error) + + def apply(self): + current = self.get_fdsp() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.add_fdsp() + elif cd_action == 'delete': + self.remove_fdsp() + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates or removes File Directory Security Policy + """ + obj = NetAppOntapFDSP() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py new file mode 100644 index 000000000..9e402d952 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdspt.py @@ -0,0 +1,257 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_fdspt +short_description: NetApp ONTAP create, delete or modify File Directory security policy tasks +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, modify or remove file directory security policy tasks. + +options: + state: + description: + - Whether the specified Policy Task should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + name: + description: + - Specifies the name of the policy the task will be associated with. + required: true + type: str + + vserver: + description: + - Specifies the vserver for the File Directory security policy. + required: true + type: str + + access_control: + description: + - Specifies access control of the task. + choices: ['file_directory', 'slag'] + type: str + + ntfs_mode: + description: + - Specifies NTFS propagation mode. + choices: ['propagate', 'ignore', 'replace'] + type: str + + ntfs_sd: + description: + - Specifies the NTFS security descriptor name. + type: list + elements: str + + path: + description: + - Specifies the file or folder path of the task. In case of SLAG this path specify the volume or qtree mounted path. + required: true + type: str + + security_type: + description: + - Specifies the type of security. If not specified ONTAP will default to ntfs. + choices: ['ntfs', 'nfsv4'] + type: str + + index_num: + description: + - Specifies the index number of a task. Tasks are applied in order. A task with a larger index value is applied after a task with a lower \ + index number. If you do not specify this optional parameter, new tasks are applied to the end of the index list. + type: int + +notes: +- check_mode is supported for this module. +""" + +EXAMPLES = """ + - name: Create File Directory Security Policy Task + netapp.ontap.na_ontap_na_ontap_fdspt: + state: present + name: "ansible_pl" + access_control: "file_directory" + ntfs_sd: "ansible1_sd" + ntfs_mode: "replace" + security_type: "ntfs" + path: "/volume1" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Modify File Directory Security Policy Task + netapp.ontap.na_ontap_na_ontap_fdspt: + state: present + name: "ansible_pl" + access_control: "file_directory" + path: "/volume1" + ntfs_sd: "ansible1_sd" + ntfs_mode: "replace" + security_type: "ntfs" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Remove File Directory Security Policy Task + netapp.ontap.na_ontap_na_ontap_fdspt: + state: absent + vserver: "SVM1" + name: "ansible_pl" + access_control: "file_directory" + path: "/volume1" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFDSPT(): + """ + Creates, Modifies and removes a File Directory Security Policy Tasks + """ + def __init__(self): + """ + Initialize the Ontap File Directory Security Policy Tasks class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + path=dict(required=True, type='str'), + access_control=dict(required=False, choices=['file_directory', 'slag'], type='str'), + ntfs_sd=dict(required=False, type='list', elements='str'), + ntfs_mode=dict(required=False, choices=['propagate', 'ignore', 'replace'], type='str'), + security_type=dict(required=False, choices=['ntfs', 'nfsv4'], type='str'), + index_num=dict(required=False, type='int') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdspt', '9.6')) + + def get_fdspt(self): + """ + Get File Directory Security Policy Task + """ + api = "private/cli/vserver/security/file-directory/policy/task" + query = { + 'policy-name': self.parameters['name'], + 'path': self.parameters['path'], + 'fields': 'vserver,ntfs-mode,ntfs-sd,security-type,access-control,index-num' + } + + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_1_records(api, message, error) + + if error: + self.module.fail_json(msg=error) + if records: + if 'ntfs_sd' not in records: # ntfs_sd is not included in the response if there is not an associated value. Required for modify + records['ntfs_sd'] = [] + + return records if records else None + + def add_fdspt(self): + """ + Adds a new File Directory Security Policy Task + """ + api = "private/cli/vserver/security/file-directory/policy/task/add" + body = { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'], + 'path': self.parameters['path'] + } + + for i in ('ntfs_mode', 'ntfs_sd', 'security_type', 'access_control', 'index_num'): + if i in self.parameters: + body[i.replace('_', '-')] = self.parameters[i] + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def remove_fdspt(self): + """ + Deletes a File Directory Security Policy Task + """ + api = "private/cli/vserver/security/file-directory/policy/task/remove" + body = { + 'policy-name': self.parameters['name'], + 'vserver': self.parameters['vserver'], + 'path': self.parameters['path'] + } + + dummy, error = self.rest_api.delete(api, body) + if error: + self.module.fail_json(msg=error) + + def modify_fdspt(self): + """ + Modifies a File Directory Security Policy Task + """ + # Modify endpoint is not functional. + self.remove_fdspt() + self.add_fdspt() + + def apply(self): + current, modify = self.get_fdspt(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.add_fdspt() + elif cd_action == 'delete': + self.remove_fdspt() + elif modify: + self.modify_fdspt() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates, deletes and modifies File Directory Security Policy Tasks + """ + obj = NetAppOntapFDSPT() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py new file mode 100644 index 000000000..6c4670221 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fdss.py @@ -0,0 +1,123 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_fdss +short_description: NetApp ONTAP File Directory Security Set. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Set file directory security information. +- This module is not idempotent. If re-running this module to apply the currently assigned policy, the policy will be reassigned. +options: + state: + description: + - Whether the specified Policy Task should exist or not. + choices: ['present'] + default: present + type: str + name: + description: + - Specifies the security policy to apply. + required: true + type: str + + vserver: + description: + - Specifies the Vserver that contains the path to which the security policy is applied. + required: true + type: str +""" +EXAMPLES = """ + - name: Set File Directory Security + netapp.ontap.na_ontap_fdss: + state: present + vserver: "svm1" + name: "ansible_pl" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFDSS(): + """ + Applys a File Directory Security Policy + """ + def __init__(self): + """ + Initialize the Ontap File Directory Security class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present'], default='present'), + name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_fdss', '9.6')) + + def set_fdss(self): + """ + Apply File Directory Security + """ + + api = "private/cli/vserver/security/file-directory/apply" + query = { + 'policy_name': self.parameters['name'], + 'vserver': self.parameters['vserver'], + } + + response, error = self.rest_api.post(api, query) # response will contain the job ID created by the post. + response, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api) + + if error: + self.module.fail_json(msg=error) + + def apply(self): + self.set_fdss() + self.module.exit_json(changed=True) + + +def main(): + """ + File Directory Security Policy Tasks + """ + obj = NetAppOntapFDSS() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py new file mode 100644 index 000000000..d0f89d826 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_directory_policy.py @@ -0,0 +1,363 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = """ + +module: na_ontap_file_directory_policy +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp ONTAP create, delete, or modify vserver security file-directory policy +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: 20.8.0 +description: + - Create, modify, or destroy vserver security file-directory policy + - Add or remove task from policy. + - Each time a policy/task is created/modified, automatically apply policy to vserver. + - This module only supports ZAPI and is deprecated. + - The final version of ONTAP to support ZAPI is 9.12.1. +options: + state: + description: + - Whether the specified policy or task should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver for the policy. + required: true + type: str + + policy_name: + description: + - Specifies the name of the policy. + type: str + required: true + + access_control: + description: + - Specifies the access control of task to be applied. + choices: ['file_directory', 'slag'] + type: str + + ntfs_mode: + description: + - Specifies NTFS Propagation Mode. + choices: ['propagate', 'ignore', 'replace'] + type: str + + ntfs_sd: + description: + - Specifies NTFS security descriptor identifier. + type: list + elements: str + + path: + description: + - Specifies the file or folder path of the task. + - If path is specified and the policy which the task is adding to, does not exist, it will create the policy first then add the task to it. + - If path is specified, delete operation only removes task from policy. + type: str + + security_type: + description: + - Specifies the type of security. + type: str + choices: ['ntfs', 'nfsv4'] + + ignore_broken_symlinks: + description: + - Skip Broken Symlinks. + - Options used when applying the policy to vserver. + type: bool + +""" + +EXAMPLES = """ + + - name: create policy + na_ontap_file_directory_policy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: present + vserver: ansible + policy_name: file_policy + ignore_broken_symlinks: false + + - name: add task to existing file_policy + na_ontap_file_directory_policy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: present + vserver: ansible + policy_name: file_policy + path: /vol + ntfs_sd: ansible_sd + ntfs_mode: propagate + + - name: delete task from file_policy. + na_ontap_file_directory_policy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: absent + vserver: ansible + policy_name: file_policy + path: /vol + + - name: delete file_policy along with the tasks. + na_ontap_file_directory_policy: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + state: absent + vserver: ansible + policy_name: file_policy + + +""" + +RETURN = """ + +""" + +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapFilePolicy(object): + + def __init__(self): + """ + Initialize the Ontap file directory policy class + """ + + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + policy_name=dict(required=True, type='str'), + access_control=dict(required=False, type='str', choices=['file_directory', 'slag']), + ntfs_mode=dict(required=False, choices=['propagate', 'ignore', 'replace']), + ntfs_sd=dict(required=False, type='list', elements='str'), + path=dict(required=False, type='str'), + security_type=dict(required=False, type='str', choices=['ntfs', 'nfsv4']), + ignore_broken_symlinks=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_deprecated(self.module) + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg='The python NetApp-Lib module is required') + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def create_policy(self): + policy_obj = netapp_utils.zapi.NaElement("file-directory-security-policy-create") + policy_obj.add_new_child('policy-name', self.parameters['policy_name']) + try: + self.server.invoke_successfully(policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error creating file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_policy_iter(self): + policy_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-get-iter') + policy_info = netapp_utils.zapi.NaElement('file-directory-security-policy') + policy_info.add_new_child('vserver', self.parameters['vserver']) + policy_info.add_new_child('policy-name', self.parameters['policy_name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(policy_info) + policy_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(policy_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching file-directory policy %s: %s' + % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + policy = attributes_list.get_child_by_name('file-directory-security-policy') + return policy.get_child_content('policy-name') + return None + + def remove_policy(self): + remove_policy = netapp_utils.zapi.NaElement('file-directory-security-policy-delete') + remove_policy.add_new_child('policy-name', self.parameters['policy_name']) + try: + self.server.invoke_successfully(remove_policy, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error removing file-directory policy %s: %s' % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_task_iter(self): + task_get_iter = netapp_utils.zapi.NaElement('file-directory-security-policy-task-get-iter') + task_info = netapp_utils.zapi.NaElement('file-directory-security-policy-task') + task_info.add_new_child('vserver', self.parameters['vserver']) + task_info.add_new_child('policy-name', self.parameters['policy_name']) + task_info.add_new_child('path', self.parameters['path']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(task_info) + task_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(task_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching task from file-directory policy %s: %s' + % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + task = attributes_list.get_child_by_name('file-directory-security-policy-task') + task_result = dict() + task_result['path'] = task.get_child_content('path') + if task.get_child_by_name('ntfs-mode'): + task_result['ntfs_mode'] = task.get_child_content('ntfs-mode') + if task.get_child_by_name('security-type'): + task_result['security_type'] = task.get_child_content('security-type') + if task.get_child_by_name('ntfs-sd'): + task_result['ntfs_sd'] = [ntfs_sd.get_content() for ntfs_sd in task.get_child_by_name('ntfs-sd').get_children()] + return task_result + return None + + def add_task_to_policy(self): + policy_add_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-add') + policy_add_task.add_new_child('path', self.parameters['path']) + policy_add_task.add_new_child('policy-name', self.parameters['policy_name']) + if self.parameters.get('access_control') is not None: + policy_add_task.add_new_child('access-control', self.parameters['access_control']) + if self.parameters.get('ntfs_mode') is not None: + policy_add_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode']) + if self.parameters.get('ntfs_sd') is not None: + ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd') + for ntfs_sd in self.parameters['ntfs_sd']: + ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd) + policy_add_task.add_child_elem(ntfs_sds) + if self.parameters.get('security_type') is not None: + policy_add_task.add_new_child('security-type', self.parameters['security_type']) + try: + self.server.invoke_successfully(policy_add_task, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding task to file-directory policy %s: %s' + % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def remove_task_from_policy(self): + policy_remove_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-remove') + policy_remove_task.add_new_child('path', self.parameters['path']) + policy_remove_task.add_new_child('policy-name', self.parameters['policy_name']) + try: + self.server.invoke_successfully(policy_remove_task, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing task from file-directory policy %s: %s' + % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_task(self, modify): + policy_modify_task = netapp_utils.zapi.NaElement('file-directory-security-policy-task-modify') + policy_modify_task.add_new_child('path', self.parameters['path']) + policy_modify_task.add_new_child('policy-name', self.parameters['policy_name']) + if modify.get('ntfs_mode') is not None: + policy_modify_task.add_new_child('ntfs-mode', self.parameters['ntfs_mode']) + if modify.get('ntfs_sd') is not None: + ntfs_sds = netapp_utils.zapi.NaElement('ntfs-sd') + for ntfs_sd in self.parameters['ntfs_sd']: + ntfs_sds.add_new_child('file-security-ntfs-sd', ntfs_sd) + policy_modify_task.add_child_elem(ntfs_sds) + if modify.get('security_type') is not None: + policy_modify_task.add_new_child('security-type', self.parameters['security_type']) + try: + self.server.invoke_successfully(policy_modify_task, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying task in file-directory policy %s: %s' + % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def set_sd(self): + set_sd = netapp_utils.zapi.NaElement('file-directory-security-set') + set_sd.add_new_child('policy-name', self.parameters['policy_name']) + if self.parameters.get('ignore-broken-symlinks'): + set_sd.add_new_child('ignore-broken-symlinks', str(self.parameters['ignore_broken_symlinks'])) + try: + self.server.invoke_successfully(set_sd, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error applying file-directory policy %s: %s' + % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_policy_iter() + cd_action, task_cd_action, task_modify = None, None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.parameters.get('path'): + current_task = self.get_task_iter() + task_cd_action = self.na_helper.get_cd_action(current_task, self.parameters) + if task_cd_action is None and self.parameters['state'] == 'present': + task_modify = self.na_helper.get_modified_attributes(current_task, self.parameters) + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if self.parameters.get('path'): + if task_cd_action == 'create': + # if policy doesn't exist, create the policy first. + if cd_action == 'create': + self.create_policy() + self.add_task_to_policy() + self.set_sd() + elif task_cd_action == 'delete': + # delete the task, not the policy. + self.remove_task_from_policy() + elif task_modify: + self.modify_task(task_modify) + self.set_sd() + else: + if cd_action == 'create': + self.create_policy() + self.set_sd() + elif cd_action == 'delete': + self.remove_policy() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, extra_responses={'task action': task_cd_action, + 'task modify': task_modify}) + self.module.exit_json(**result) + + +def main(): + """ + Creates, deletes and modifies file directory policy + """ + obj = NetAppOntapFilePolicy() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py new file mode 100644 index 000000000..2e5b844f0 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions.py @@ -0,0 +1,760 @@ +#!/usr/bin/python + +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_file_security_permissions +short_description: NetApp ONTAP NTFS file security permissions +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 22.0.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create, delete, or modify NTFS file security and audit policies of file or directory on NetApp ONTAP. + - Note that ACLs are mached based on ('user', 'access', 'access_control', 'apply_to'). + In order to modify any of these 4 properties, the module deletes the ACL and creates a new one. + +options: + state: + description: + - Whether the specified file security permission should exist or not. + - When absent, all ACLs are deleted, irrespective of the contents of C(acls). + - See C(access_control) to only delete all SLAG ACLS, or only delete file-directory ACLs. + - Inherited ACLs are ignored, they can't be deleted or modified. + choices: ['present', 'absent'] + type: str + default: 'present' + + path: + description: + - The path of the file or directory on which to apply security permissions. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + owner: + description: + - Specifies the owner of the NTFS security descriptor (SD). + - You can specify the owner using either a user name or security identifier (SID). + - The owner of the SD can modify the permissions on the file (or folder) or files (or folders) to which the SD is applied and + can give other users the right to take ownership of the object or objects to which the SD is applied. + type: str + + control_flags: + description: + - Specifies the control flags in the SD. It is a Hexadecimal Value. + type: str + + group: + description: + - Specifies the owner's primary group. + - Specify the owner group using either a group name or SID. + type: str + + ignore_paths: + description: + - For each file or directory in the list, specifies that permissions on this file or directory cannot be replaced. + type: list + elements: str + + propagation_mode: + description: + - Specifies how to propagate security settings to child subfolders and files. + - Defaults to propagate. + choices: ['propagate', 'replace'] + type: str + + access_control: + description: + - An Access Control Level specifies the access control of the task to be applied. + - Valid values are "file-directory" or "Storage-Level Access Guard (SLAG)". + - SLAG is used to apply the specified security descriptors with the task for the volume or qtree. + - Otherwise, the security descriptors are applied on files and directories at the specified path. + - The value slag is not supported on FlexGroups volumes. The default value is "file-directory". + - This field requires ONTAP 9.10.1 or later. This defaults to "file_directory". + - When state is present, all ACLs not listed in C(acls) are deleted when this option is absent. + If this option is present, only ACLs matching its value are deleted. + - When state is absent, all ACLs are deleted when this option is absent. + If this option is present, only ACLs matching its value are deleted. + choices: ['file_directory', 'slag'] + type: str + + acls: + description: + - A discretionary access security list (DACL) identifies the trustees that are allowed or denied access to a securable object. + - When a process tries to access a securable object, the system checks the access control entries (ACEs) + in the object's DACL to determine whether to grant access to it. + type: list + elements: dict + suboptions: + access_control: + description: + - An Access Control Level specifies the access control of the task to be applied. + - Valid values are "file-directory" or "Storage-Level Access Guard (SLAG)". + - SLAG is used to apply the specified security descriptors with the task for the volume or qtree. + - Otherwise, the security descriptors are applied on files and directories at the specified path. + - The value slag is not supported on FlexGroups volumes. The default value is "file-directory". + - This field requires ONTAP 9.10.1 or later. This defaults to "file_directory". + choices: ['file_directory', 'slag'] + type: str + access: + description: + - Specifies whether the ACL is for DACL or SACL. + - Currently tested with access_allow, access_deny for DACL and audit_failure, audit_success for SACL. + choices: [access_allow, access_deny, + access_allowed_callback, access_denied_callback, access_allowed_callback_object, access_denied_callback_object, + system_audit_callback, system_audit_callback_object, system_resource_attribute, system_scoped_policy_id, + audit_failure, audit_success, audit_success_and_failure] + type: str + required: true + user: + description: + - Specifies the account to which the ACE applies. Specify either name or SID. + - As of 21.24.0, the module is not idempotent when using a SID. + - To make it easier when also using C(na_ontap_file_security_permissions_acl), this is aliased to C(acl_user). + type: str + required: true + aliases: ['acl_user'] + rights: + description: + - Specifies the access right controlled by the ACE for the account specified. + - The "rights" parameter is mutually exclusive with the "advanced_rights" parameter. + - ONTAP translates rights into advanced_rights and this module is not idempotent when rights are used. + - Make sure to use C(advanced_rights) to maintain idempotency. C(rights) can be used to discover the mapping to C(advanced_rights). + choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'] + type: str + apply_to: + description: + - Specifies where to apply the DACL or SACL entries. + - At least one suboption must be set to true. Suboptions that are not set are assumed to be false. + - With SLAGs, ONTAP accepts the three suboptions to be set to true, but creates 2 ACLs. + This module requires the 2 ACLs to be present to preserve idempotency. + See also C(validate_changes). + type: dict + required: true + suboptions: + files: + description: + - Apply to Files. + type: bool + default: false + sub_folders: + description: + - Apply to all sub-folders. + type: bool + default: false + this_folder: + description: + - Apply only to this folder + type: bool + default: false + advanced_rights: + description: + - Specifies the advanced access right controlled by the ACE for the account specified. + type: dict + suboptions: + append_data: + description: + - Append Data. + type: bool + delete: + description: + - Delete. + type: bool + delete_child: + description: + - Delete Child. + type: bool + execute_file: + description: + - Execute File. + type: bool + full_control: + description: + - Full Control. + type: bool + read_attr: + description: + - Read Attributes. + type: bool + read_data: + description: + - Read Data. + type: bool + read_ea: + description: + - Read Extended Attributes. + type: bool + read_perm: + description: + - Read Permissions. + type: bool + write_attr: + description: + - Write Attributes. + type: bool + write_data: + description: + - Write Data. + type: bool + write_ea: + description: + - Write Extended Attributes. + type: bool + write_owner: + description: + - Write Owner. + type: bool + write_perm: + description: + - Write Permission. + type: bool + ignore_paths: + description: + - For each file or directory in the list, specifies that permissions on this file or directory cannot be replaced. + type: list + elements: str + propagation_mode: + description: + - Specifies how to propagate security settings to child subfolders and files. + - Defaults to propagate. + - This option valid only in create ACL. + choices: ['propagate', 'replace'] + type: str + + validate_changes: + description: + - ACLs may not be applied as expected. + - For instance, if Everyone is inherited will all permissions, additional users will be granted all permissions, regardless of the request. + - For this specific example, you can either delete the top level Everyone, or create a new ACL for Everyone at a lower level. + - When using C(rights), ONTAP translates them into C(advanced_rights) so the validation will always fail. + - Valid values are C(ignore), no checking; C(warn) to issue a warning; C(error) to fail the module. + - With SLAGS, ONTAP may split one ACL into two ACLs depending on the C(apply_to) settings. To maintain idempotency, please provide 2 ACLs as input. + choices: ['ignore', 'warn', 'error'] + type: str + default: error + +notes: + - Supports check_mode. + - Only supported with REST and requires ONTAP 9.9.1 or later.. + - SLAG requires ONTAP 9.10.1 or later. + - When state is present, if an ACL is inherited, and a desired ACL matches, a new ACL is created as the inherited cannot be modified. + - When state is absent, inherited ACLs are ignored. +''' + +EXAMPLES = """ + - name: Create file directory security permissions. + netapp.ontap.na_ontap_file_security_permissions: + state: present + vserver: svm1 + access_control: file_directory + path: /vol200/newfile.txt + owner: "{{ user }}" + # Note, wihout quotes, use a single backslash in AD user names + # with quotes, it needs to be escaped as a double backslash + # user: "ANSIBLE_CIFS\\user1" + # we can't show an example with a single backslash as this is a python file, but it works in YAML. + acls: + - access: access_deny + user: "{{ user }}" + apply_to: + files: true + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + + - name: Modify file directory security permissions. + netapp.ontap.na_ontap_file_security_permissions: + state: present + vserver: svm1 + access_control: file_directory + path: /vol200/newfile.txt + acls: + - access: access_deny + user: "{{ user }}" + apply_to: + files: true + - access: access_allow + user: "{{ user }}" + apply_to: + files: true + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + + - name: Delete file directory security ACLs. + netapp.ontap.na_ontap_file_security_permissions: + state: absent + vserver: svm1 + access_control: file_directory + path: /vol200/newfile.txt + acls: + - access: access_deny + user: "{{ user }}" + apply_to: + files: true + - access: access_allow + user: "{{ user }}" + apply_to: + files: true + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppOntapFileSecurityPermissions: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + path=dict(required=True, type='str'), + owner=dict(required=False, type='str'), + control_flags=dict(required=False, type='str'), + group=dict(required=False, type='str'), + access_control=dict(required=False, type='str', choices=['file_directory', 'slag']), + ignore_paths=dict(required=False, type='list', elements='str'), + propagation_mode=dict(required=False, type='str', choices=['propagate', 'replace']), + acls=dict(type='list', elements='dict', options=dict( + access=dict(required=True, type='str', choices=[ + 'access_allow', 'access_deny', + 'access_allowed_callback', 'access_denied_callback', 'access_allowed_callback_object', 'access_denied_callback_object', + 'system_audit_callback', 'system_audit_callback_object', 'system_resource_attribute', 'system_scoped_policy_id', + 'audit_failure', 'audit_success', 'audit_success_and_failure']), + access_control=dict(required=False, type='str', choices=['file_directory', 'slag']), + user=dict(required=True, type='str', aliases=['acl_user']), + rights=dict(required=False, + choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'], + type='str'), + apply_to=dict(required=True, type='dict', options=dict( + files=dict(required=False, type='bool', default=False), + sub_folders=dict(required=False, type='bool', default=False), + this_folder=dict(required=False, type='bool', default=False), + )), + advanced_rights=dict(required=False, type='dict', options=dict( + append_data=dict(required=False, type='bool'), + delete=dict(required=False, type='bool'), + delete_child=dict(required=False, type='bool'), + execute_file=dict(required=False, type='bool'), + full_control=dict(required=False, type='bool'), + read_attr=dict(required=False, type='bool'), + read_data=dict(required=False, type='bool'), + read_ea=dict(required=False, type='bool'), + read_perm=dict(required=False, type='bool'), + write_attr=dict(required=False, type='bool'), + write_data=dict(required=False, type='bool'), + write_ea=dict(required=False, type='bool'), + write_owner=dict(required=False, type='bool'), + write_perm=dict(required=False, type='bool'), + )), + ignore_paths=dict(required=False, type='list', elements='str'), + propagation_mode=dict(required=False, type='str', choices=['propagate', 'replace']), + )), + validate_changes=dict(required=False, type='str', choices=['ignore', 'warn', 'error'], default='error'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.na_helper = NetAppModule(self) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_file_security_permissions', 9, 9, 1) + dummy, error = self.rest_api.is_rest(partially_supported_rest_properties=[['access_control', (9, 10, 1)], ['acls.access_control', (9, 10, 1)]], + parameters=self.parameters) + if error: + self.module.fail_json(msg=error) + self.parameters = self.na_helper.filter_out_none_entries(self.parameters) + self.apply_to_keys = ['files', 'sub_folders', 'this_folder'] + # POST at SD level only expects a subset of keys in ACL + self.post_acl_keys = ['access', 'advanced_rights', 'apply_to', 'rights', 'user'] + if self.parameters['state'] == 'present': + self.validate_acls() + + def validate_acls(self): + if 'acls' not in self.parameters: + return + self.parameters['acls'] = self.na_helper.filter_out_none_entries(self.parameters['acls']) + for acl in self.parameters['acls']: + if 'rights' in acl: + if 'advanced_rights' in acl: + self.module.fail_json(msg="Error: suboptions 'rights' and 'advanced_rights' are mutually exclusive.") + self.module.warn('This module is not idempotent when "rights" is used, make sure to use "advanced_rights".') + # validate that at least one suboption is true + if not any(self.na_helper.safe_get(acl, ['apply_to', key]) for key in self.apply_to_keys): + self.module.fail_json(msg="Error: at least one suboption must be true for apply_to. Got: %s" % acl) + # error if identical acls are set. + self.match_acl_with_acls(acl, self.parameters['acls']) + for option in ('access_control', 'ignore_paths', 'propagation_mode'): + value = self.parameters.get(option) + if value is not None: + for acl in self.parameters['acls']: + if acl.get(option) not in (None, value): + self.module.fail_json(msg="Error: mismatch between top level value and ACL value for %s: %s vs %s" + % (option, value, acl.get(option))) + # make sure options are set in ach ACL, so we can match easily desired ACLs with current ACLs + acl[option] = value + + @staticmethod + def url_encode(url_fragment): + """ + replace special characters with URL encoding: + %2F for /, %5C for backslash + """ + # \ is the escape character in python, so \\ means \ + return url_fragment.replace("/", "%2F").replace("\\", "%5C") + + def get_svm_uuid(self): + self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + + def get_file_security_permissions(self): + api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + fields = 'acls,control_flags,group,owner' + record, error = rest_generic.get_one_record(self.rest_api, api, {'fields': fields}) + # If we get 655865 the path we gave was not found, so we don't want to fail we want to return None + if error: + # if path not exists and state absent, return None and changed is False. + if '655865' in error and self.parameters['state'] == 'absent': + return None + self.module.fail_json(msg="Error fetching file security permissions %s: %s" % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + return self.form_current(record) if record else None + + def form_current(self, record): + current = { + 'group': self.na_helper.safe_get(record, ['group']), + 'owner': self.na_helper.safe_get(record, ['owner']), + 'control_flags': self.na_helper.safe_get(record, ['control_flags']), + 'path': record['path'] + } + acls = [] + + def form_acl(acl): + advanced_rights_keys = ['append_data', 'delete', 'delete_child', 'execute_file', 'full_control', 'read_attr', + 'read_data', 'read_ea', 'read_perm', 'write_attr', 'write_data', 'write_ea', 'write_owner', 'write_perm'] + advanced_rights = {} + apply_to = {} + if 'advanced_rights' in acl: + for key in advanced_rights_keys: + # REST does not return the keys when the value is False + advanced_rights[key] = acl['advanced_rights'].get(key, False) + if 'apply_to' in acl: + for key in self.apply_to_keys: + # REST does not return the keys when the value is False + apply_to[key] = acl['apply_to'].get(key, False) + return { + 'advanced_rights': advanced_rights or None, + 'apply_to': apply_to or None + } + + for acl in record.get('acls', []): + each_acl = { + 'access': self.na_helper.safe_get(acl, ['access']), + 'access_control': self.na_helper.safe_get(acl, ['access_control']), + 'inherited': self.na_helper.safe_get(acl, ['inherited']), + 'rights': self.na_helper.safe_get(acl, ['rights']), + 'user': self.na_helper.safe_get(acl, ['user']), + } + each_acl.update(form_acl(acl)) + acls.append(each_acl) + current['acls'] = acls or None + return current + + @staticmethod + def has_acls(current): + return bool(current and current.get('acls')) + + def set_option(self, body, option): + if self.parameters.get(option) is not None: + body[option] = self.parameters[option] + return True + return False + + def sanitize_acl_for_post(self, acl): + ''' some fields like access_control, propagation_mode are not accepted for POST operation ''' + post_acl = dict(acl) + for key in acl: + if key not in self.post_acl_keys: + post_acl.pop(key) + return post_acl + + def sanitize_acls_for_post(self, acls): + ''' some fields like access_control, propagation_mode are not accepted for POST operation ''' + return [self.sanitize_acl_for_post(acl) for acl in acls] + + def create_file_security_permissions(self): + api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + body = {} + for option in ('access_control', 'control_flags', 'group', 'owner', 'ignore_paths', 'propagation_mode'): + self.set_option(body, option) + body['acls'] = self.sanitize_acls_for_post(self.parameters.get('acls', [])) + dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating file security permissions %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def add_file_security_permissions_acl(self, acl): + api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + for option in ('access_control', 'propagation_mode'): + # we already verified these options are consistent when present, so it's OK to overrid + self.set_option(acl, option) + dummy, error = rest_generic.post_async(self.rest_api, api, acl, timeout=0) + if error: + self.module.fail_json(msg='Error adding file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def modify_file_security_permissions_acl(self, acl): + api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + acl = dict(acl) + user = acl.pop('user') + for option in ('access_control', 'propagation_mode'): + # we already verified these options are consistent when present, so it's OK to overrid + self.set_option(acl, option) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.url_encode(user), acl, {'return_records': 'true'}) + if error: + self.module.fail_json(msg='Error modifying file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def delete_file_security_permissions_acl(self, acl): + api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + # some fieds are set to None when not present + acl = self.na_helper.filter_out_none_entries(acl) + # drop keys not accepted in body + user = acl.pop('user') + acl.pop('advanced_rights', None) + acl.pop('rights', None) + acl.pop('inherited', None) + for option in ('access_control', 'propagation_mode'): + # we already verified these options are consistent when present, so it's OK to override + self.set_option(acl, option) + dummy, error = rest_generic.delete_async(self.rest_api, api, self.url_encode(user), {'return_records': 'true'}, acl, timeout=0) + if error: + self.module.fail_json(msg='Error deleting file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def modify_file_security_permissions(self, modify): + api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + body = {} + for option in modify: + self.set_option(body, option) + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error modifying file security permissions %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def match_acl_with_acls(self, acl, acls): + """ return acl if user and access and apply_to are matched, otherwiese None """ + matches = [] + for an_acl in acls: + # with 9.9.1, access_control is not supported. It will be set to None in received ACLs, and omitted in desired ACLs + # but we can assume the user would like to see file_directory. + # We can't modify inherited ACLs. But we can create a new one at a lower scope. + inherited = an_acl['inherited'] if 'inherited' in an_acl else False and (acl['inherited'] if 'inherited' in acl else False) + if (acl['user'] == an_acl['user'] + and acl['access'] == an_acl['access'] + and acl.get('access_control', 'file_directory') == an_acl.get('access_control', 'file_directory') + and acl['apply_to'] == an_acl['apply_to'] + and not inherited): + matches.append(an_acl) + if len(matches) > 1: + self.module.fail_json(msg='Error: found more than one desired ACLs with same user, access, access_control and apply_to %s' % matches) + return matches[0] if matches else None + + def get_acl_actions_on_modify(self, modify, current): + acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []} + if not self.has_acls(current): + acl_actions['post-acls'] = modify['acls'] + return acl_actions + for acl in modify['acls']: + current_acl = self.match_acl_with_acls(acl, current['acls']) + if current_acl: + # if exact match of 2 acl found, look for modify in that matched desired and current acl. + if self.is_modify_acl_required(acl, current_acl): + acl_actions['patch-acls'].append(acl) + else: + acl_actions['post-acls'].append(acl) + # Ignore inherited ACLs + for acl in current['acls']: + desired_acl = self.match_acl_with_acls(acl, self.parameters['acls']) + if not desired_acl and not acl.get('inherited') and self.parameters.get('access_control') in (None, acl.get('access_control')): + # only delete ACLs that matches the desired access_control, or all ACLs if not set + acl_actions['delete-acls'].append(acl) + return acl_actions + + def is_modify_acl_required(self, desired_acl, current_acl): + current_acl_copy = current_acl.copy() + current_acl_copy.pop('user') + modify = self.na_helper.get_modified_attributes(current_acl_copy, desired_acl) + return bool(modify) + + def get_acl_actions_on_delete(self, current): + acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []} + self.na_helper.changed = False + if current.get('acls'): + for acl in current['acls']: + # only delete ACLs that matches the desired access_control, or all ACLs if not set + if not acl.get('inherited') and self.parameters.get('access_control') in (None, acl.get('access_control')): + self.na_helper.changed = True + acl_actions['delete-acls'].append(acl) + return acl_actions + + def get_modify_actions(self, current): + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if 'path' in modify: + self.module.fail_json(msg='Error: mismatch on path values: desired: %s, received: %s' % (self.parameters['path'], current['path'])) + if 'acls' in modify: + acl_actions = self.get_acl_actions_on_modify(modify, current) + # validate_modify function will check a modify in acl is required or not. + # if neither patch-acls or post-acls required and modify None, set changed to False. + del modify['acls'] + else: + acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []} + if not any((acl_actions['patch-acls'], acl_actions['post-acls'], acl_actions['delete-acls'], modify)): + self.na_helper.changed = False + return modify, acl_actions + + def get_acl_actions_on_create(self): + """ + POST does not accept access_control and propagation_mode at the ACL level, these are global values for all ACLs. + Since the user could have a list of ACLs with mixed property we should useP OST the create the SD and 1 group of ACLs + then loop over the remaining ACLS. + """ + # split ACLs into four categories + acls_groups = {} + preferred_group = (None, None) + special_accesses = [] + for acl in self.parameters.get('acls', []): + access_control = acl.get('access_control', 'file_directory') + propagation_mode = acl.get('propagation_mode', 'propagate') + if access_control not in acls_groups: + acls_groups[access_control] = {} + if propagation_mode not in acls_groups[access_control]: + acls_groups[access_control][propagation_mode] = [] + acls_groups[access_control][propagation_mode].append(acl) + access = acl.get('access') + if access not in ('access_allow', 'access_deny', 'audit_success', 'audit_failure'): + if preferred_group == (None, None): + preferred_group = (access_control, propagation_mode) + if preferred_group != (access_control, propagation_mode): + self.module.fail_json(msg="Error: acl %s with access %s conflicts with other ACLs using accesses: %s " + "with different access_control or propagation_mode: %s." + % (acl, access, special_accesses, preferred_group)) + special_accesses.append(access) + + if preferred_group == (None, None): + # find a non empty list of ACLs + # use sorted to make this deterministic + for acc_key, acc_value in sorted(acls_groups.items()): + for prop_key, prop_value in sorted(acc_value.items()): + if prop_value: + preferred_group = (acc_key, prop_key) + break + if preferred_group != (None, None): + break + + # keep one category for create, and move the remaining ACLs into post-acls + create_acls = [] + acl_actions = {'patch-acls': [], 'post-acls': [], 'delete-acls': []} + # use sorted to make this deterministic + for acc_key, acc_value in sorted(acls_groups.items()): + for prop_key, prop_value in sorted(acc_value.items()): + if (acc_key, prop_key) == preferred_group: + create_acls = prop_value + self.parameters['access_control'] = acc_key + self.parameters['propagation_mode'] = prop_key + elif prop_value: + acl_actions['post-acls'].extend(prop_value) + self.parameters['acls'] = create_acls + return acl_actions + + def get_actions(self): + current = self.get_file_security_permissions() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify, acl_actions = self.get_modify_actions(current) if cd_action is None else (None, {}) + if cd_action == 'create' and self.parameters.get('access_control') is None: + acl_actions = self.get_acl_actions_on_create() + if cd_action == 'delete': + # delete is not supported by the API, or rather a DELETE will only delete the SLAG ACLs and nothing else. + # so we just loop through all the ACLs + acl_actions = self.get_acl_actions_on_delete(current) + cd_action = None + return cd_action, modify, acl_actions + + def apply(self): + + self.get_svm_uuid() + cd_action, modify, acl_actions = self.get_actions() + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_file_security_permissions() + if modify: + self.modify_file_security_permissions(modify) + # delete ACLs first, to avoid conflicts with new or modified rules + for delete_acl in acl_actions.get('delete-acls', []): + self.delete_file_security_permissions_acl(delete_acl) + # PATCH call succeeds, but its not working: changes are not reflecting + # modify before adding new rules to avoid conflicts + for patch_acl in acl_actions.get('patch-acls', []): + self.modify_file_security_permissions_acl(patch_acl) + for post_acl in acl_actions.get('post-acls', []): + self.add_file_security_permissions_acl(post_acl) + changed = self.na_helper.changed + self.validate_changes(cd_action, modify) + self.na_helper.changed = changed + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + def validate_changes(self, cd_action, modify): + if self.parameters['validate_changes'] == 'ignore': + return + new_cd_action, new_modify, acl_actions = self.get_actions() + errors = [] + if new_cd_action is not None: + errors.append('%s still required after %s (with modify: %s)' % (new_cd_action, cd_action, modify)) + if new_modify: + errors.append('modify: %s still required after %s' % (new_modify, modify)) + # changes in ACLs + errors.extend('%s still required for %s' % (key, value) for key, value in acl_actions.items() if value) + if errors: + msg = 'Error - %s' % ' - '.join(errors) + if self.parameters['validate_changes'] == 'error': + self.module.fail_json(msg=msg) + if self.parameters['validate_changes'] == 'warn': + self.module.warn(msg) + + +def main(): + """Apply volume operations from playbook""" + obj = NetAppOntapFileSecurityPermissions() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py new file mode 100644 index 000000000..277986466 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_file_security_permissions_acl.py @@ -0,0 +1,495 @@ +#!/usr/bin/python + +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_file_security_permissions_acl +short_description: NetApp ONTAP file security permissions ACL +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 22.0.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Add, delete, or modify a file_security_permissions ACL on NetApp ONTAP. + - Note that ACLs are mached based on ('user', 'access', 'access_control', 'apply_to'). + To modify any of these 4 properties, you would need to delete the ACL and create a new one. + Or use C(netapp.ontap.na_ontap_file_security_permissions). + +options: + state: + description: + - Whether the specified file security permissions ACL should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + path: + description: + - The path of the file or directory on which to apply security permissions. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + access_control: + description: + - An Access Control Level specifies the access control of the task to be applied. + - Valid values are "file-directory" or "Storage-Level Access Guard (SLAG)". + - SLAG is used to apply the specified security descriptors with the task for the volume or qtree. + - Otherwise, the security descriptors are applied on files and directories at the specified path. + - The value slag is not supported on FlexGroups volumes. The default value is "file-directory". + - This field requires ONTAP 9.10.1 or later. This defaults to "file_directory". + choices: ['file_directory', 'slag'] + type: str + access: + description: + - An ACE is an element in an access control list (ACL). An ACL can have zero or more ACEs. + - Each ACE controls or monitors access to an object by a specified trustee. + choices: ['access_allow', 'access_deny', 'audit_failure', 'audit_success'] + type: str + required: true + acl_user: + description: + - Specifies the account to which the ACE applies. Specify either name or SID. + - As of 22.0.0, the module is not idempotent when using a SID. + - Note - we cannot use C(user) as if conflicts with the option for the admin user. + type: str + required: true + rights: + description: + - Specifies the access right controlled by the ACE for the account specified. + - The "rights" parameter is mutually exclusive with the "advanced_rights" parameter. + - ONTAP translates rights into advanced_rights and this module is not idempotent when rights are used. + - Make sure to use C(advanced_rights) to maintain idempotency. C(rights) can be used to discover the mapping to C(advanced_rights). + choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'] + type: str + apply_to: + description: + - Specifies where to apply the DACL or SACL entries. + - With SLAGs, ONTAP accepts the three suboptions to be set to true, but creates 2 ACLs. + This module requires the 2 ACLs to be present to preserve idempotency. + See also C(validate_changes). + type: dict + required: true + suboptions: + files: + description: + - Apply to Files. + type: bool + default: false + sub_folders: + description: + - Apply to all sub-folders. + type: bool + default: false + this_folder: + description: + - Apply only to this folder + type: bool + default: false + advanced_rights: + description: + - Specifies the advanced access right controlled by the ACE for the account specified. + type: dict + suboptions: + append_data: + description: + - Append Data. + type: bool + required: false + delete: + description: + - Delete. + type: bool + required: false + delete_child: + description: + - Delete Child. + type: bool + required: false + execute_file: + description: + - Execute File. + type: bool + required: false + full_control: + description: + - Full Control. + type: bool + required: false + read_attr: + description: + - Read Attributes. + type: bool + required: false + read_data: + description: + - Read Data. + type: bool + required: false + read_ea: + description: + - Read Extended Attributes. + type: bool + required: false + read_perm: + description: + - Read Permissions. + type: bool + required: false + write_attr: + description: + - Write Attributes. + type: bool + required: false + write_data: + description: + - Write Data. + type: bool + required: false + write_ea: + description: + - Write Extended Attributes. + type: bool + required: false + write_owner: + description: + - Write Owner. + type: bool + required: false + write_perm: + description: + - Write Permission. + type: bool + required: false + ignore_paths: + description: + - For each file or directory in the list, specifies that permissions on this file or directory cannot be replaced. + type: list + elements: str + propagation_mode: + description: + - Specifies how to propagate security settings to child subfolders and files. + - Defaults to propagate. + - This option is valid in create, but cannot modify. + choices: ['propagate', 'replace'] + type: str + + validate_changes: + description: + - ACLs may not be applied as expected. + - For instance, if Everyone is inherited will all permissions, additional users will be granted all permissions, regardless of the request. + - For this specific example, you can either delete the top level Everyone, or create a new ACL for Everyone at a lower level. + - When using C(rights), ONTAP translates them into C(advanced_rights) so the validation will always fail. + - Valid values are C(ignore), no checking; C(warn) to issue a warning; C(error) to fail the module. + - With SLAGS, ONTAP may split one ACL into two ACLs depending on the C(apply_to) settings. To maintain idempotency, please provide 2 ACLs as input. + choices: ['ignore', 'warn', 'error'] + type: str + default: error + +notes: + - Supports check_mode. + - Only supported with REST and requires ONTAP 9.9.1 or later. + - SLAG requires ONTAP 9.10.1 or later. +''' + +EXAMPLES = """ + - name: Add ACL for file or directory security permissions. + netapp.ontap.na_ontap_file_security_permissions_acl: + vserver: "{{ vserver_name }}" + access_control: file_directory + path: "{{ file_mount_path }}" + validate_changes: warn + access: access_allow + # Note, wihout quotes, use a single backslash in AD user names + # with quotes, it needs to be escaped as a double backslash + # user: "ANSIBLE_CIFS\\user1" + # we can't show an example with a single backslash as this is a python file, but it works in YAML. + acl_user: "user1" + apply_to: + this_folder: true + advanced_rights: + append_data: true + delete: false + + - name: Modify ACL for file or directory security permissions. + netapp.ontap.na_ontap_file_security_permissions_acl: + vserver: "{{ vserver_name }}" + access_control: file_directory + path: "{{ file_mount_path }}" + validate_changes: warn + access: access_allow + acl_user: "user1" + apply_to: + this_folder: true + advanced_rights: + append_data: false + delete: true + + - name: Delete ACL for file or directory security permissions. + netapp.ontap.na_ontap_file_security_permissions_acl: + vserver: "{{ vserver_name }}" + access_control: file_directory + path: "{{ file_mount_path }}" + validate_changes: warn + access: access_allow + acl_user: "user1" + apply_to: + this_folder: true + state: absent +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppOntapFileSecurityPermissionsACL: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + path=dict(required=True, type='str'), + access_control=dict(required=False, type='str', choices=['file_directory', 'slag']), + access=dict(required=True, choices=['access_allow', 'access_deny', 'audit_failure', 'audit_success'], type='str'), + apply_to=dict(required=True, type='dict', options=dict( + files=dict(required=False, type='bool', default=False), + sub_folders=dict(required=False, type='bool', default=False), + this_folder=dict(required=False, type='bool', default=False), + )), + advanced_rights=dict(required=False, type='dict', options=dict( + append_data=dict(required=False, type='bool'), + delete=dict(required=False, type='bool'), + delete_child=dict(required=False, type='bool'), + execute_file=dict(required=False, type='bool'), + full_control=dict(required=False, type='bool'), + read_attr=dict(required=False, type='bool'), + read_data=dict(required=False, type='bool'), + read_ea=dict(required=False, type='bool'), + read_perm=dict(required=False, type='bool'), + write_attr=dict(required=False, type='bool'), + write_data=dict(required=False, type='bool'), + write_ea=dict(required=False, type='bool'), + write_owner=dict(required=False, type='bool'), + write_perm=dict(required=False, type='bool'), + )), + ignore_paths=dict(required=False, type='list', elements='str'), + propagation_mode=dict(required=False, type='str', choices=['propagate', 'replace']), + rights=dict(required=False, + choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'], + type='str'), + acl_user=dict(required=True, type='str'), + validate_changes=dict(required=False, type='str', choices=['ignore', 'warn', 'error'], default='error'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_file_security_permissions_acl', 9, 9, 1) + dummy, error = self.rest_api.is_rest(partially_supported_rest_properties=[['access_control', (9, 10, 1)]], parameters=self.parameters) + self.apply_to_keys = ['files', 'sub_folders', 'this_folder'] + if self.parameters['state'] == 'present': + self.validate_acl() + self.parameters['user'] = self.parameters['acl_user'] + + def validate_acl(self): + self.parameters = self.na_helper.filter_out_none_entries(self.parameters) + if 'rights' in self.parameters: + if 'advanced_rights' in self.parameters: + self.module.fail_json(msg="Error: suboptions 'rights' and 'advanced_rights' are mutually exclusive.") + self.module.warn('This module is not idempotent when "rights" is used, make sure to use "advanced_rights".') + # validate that at least one suboption is true + if not any(self.na_helper.safe_get(self.parameters, ['apply_to', key]) for key in self.apply_to_keys): + self.module.fail_json(msg="Error: at least one suboption must be true for apply_to. Got: %s" % self.parameters.get('apply_to')) + + @staticmethod + def url_encode(url_fragment): + """ + replace special characters with URL encoding: + %2F for /, %5C for backslash + """ + # \ is the escape character in python, so \\ means \ + return url_fragment.replace("/", "%2F").replace("\\", "%5C") + + def get_svm_uuid(self): + self.svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + + def get_file_security_permissions_acl(self): + """ we cannot get a single ACL - get a list, and find ours""" + api = 'protocols/file-security/permissions/%s/%s' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + fields = 'acls' + record, error = rest_generic.get_one_record(self.rest_api, api, fields=fields) + # If we get 655865 the path we gave was not found, so we don't want to fail we want to return None + if error: + if '655865' in error and self.parameters['state'] == 'absent': + return None + self.module.fail_json(msg="Error fetching file security permissions %s: %s" % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + if record and 'acls' in record: + record = self.form_current(record) + return self.match_acl_with_acls(self.parameters, record['acls']) + return None + + def form_current(self, record): + current = { + 'group': self.na_helper.safe_get(record, ['group']), + 'owner': self.na_helper.safe_get(record, ['owner']), + 'control_flags': self.na_helper.safe_get(record, ['control_flags']), + 'path': record['path'] + } + acls = [] + + def form_acl(acl): + advanced_rights_keys = ['append_data', 'delete', 'delete_child', 'execute_file', 'full_control', 'read_attr', + 'read_data', 'read_ea', 'read_perm', 'write_attr', 'write_data', 'write_ea', 'write_owner', 'write_perm'] + advanced_rights = {} + apply_to = {} + if 'advanced_rights' in acl: + for key in advanced_rights_keys: + # REST does not return the keys when the value is False + advanced_rights[key] = acl['advanced_rights'].get(key, False) + if 'apply_to' in acl: + for key in self.apply_to_keys: + # REST does not return the keys when the value is False + apply_to[key] = acl['apply_to'].get(key, False) + return { + 'advanced_rights': advanced_rights or None, + 'apply_to': apply_to or None + } + + for acl in record.get('acls', []): + each_acl = { + 'access': self.na_helper.safe_get(acl, ['access']), + 'access_control': self.na_helper.safe_get(acl, ['access_control']), + 'inherited': self.na_helper.safe_get(acl, ['inherited']), + 'rights': self.na_helper.safe_get(acl, ['rights']), + 'user': self.na_helper.safe_get(acl, ['user']), + } + each_acl.update(form_acl(acl)) + acls.append(each_acl) + current['acls'] = acls or None + return current + + def build_body(self, action): + keys = { + 'create': ['access', 'access_control', 'advanced_rights', 'apply_to', 'ignore_paths', 'propagation_mode', 'rights', 'user'], + 'modify': ['access', 'access_control', 'advanced_rights', 'apply_to', 'ignore_paths', 'rights'], + 'delete': ['access', 'access_control', 'apply_to', 'ignore_paths', 'propagation_mode'], + # 'delete': ['access', 'access_control', 'ignore_paths', 'propagation_mode'], + } + if action not in keys: + self.module.fail_json(msg='Internal error - unexpected action %s' % action) + body = {} + for key in keys[action]: + if key in self.parameters: + body[key] = self.parameters[key] + return body + + def create_file_security_permissions_acl(self): + api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + body = self.build_body('create') + dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=0) + if error: + self.module.fail_json(msg='Error creating file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def modify_file_security_permissions_acl(self): + api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + body = self.build_body('modify') + dummy, error = rest_generic.patch_async(self.rest_api, api, self.url_encode(self.parameters['user']), body) + if error: + self.module.fail_json(msg='Error modifying file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def delete_file_security_permissions_acl(self): + api = 'protocols/file-security/permissions/%s/%s/acl' % (self.svm_uuid, self.url_encode(self.parameters['path'])) + body = self.build_body('delete') + dummy, error = rest_generic.delete_async(self.rest_api, api, self.url_encode(self.parameters['user']), body=body, timeout=0) + if error: + self.module.fail_json(msg='Error deleting file security permissions acl %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def match_acl_with_acls(self, acl, acls): + """ return acl if user and access and apply_to are matched, otherwiese None """ + matches = [] + for an_acl in acls: + # with 9.9.1, access_control is not supported. It will be set to None in received ACLs, and omitted in desired ACLs + # but we can assume the user would like to see file_directory. + # We can't modify inherited ACLs. But we can create a new one at a lower scope. + inherited = an_acl['inherited'] if 'inherited' in an_acl else False and (acl['inherited'] if 'inherited' in acl else False) + if (acl['user'] == an_acl['user'] + and acl['access'] == an_acl['access'] + and acl.get('access_control', 'file_directory') == an_acl.get('access_control', 'file_directory') + and acl['apply_to'] == an_acl['apply_to'] + and not inherited): + matches.append(an_acl) + if len(matches) > 1: + self.module.fail_json(msg='Error matching ACLs, found more than one match. Found %s' % matches) + return matches[0] if matches else None + + def get_actions(self): + current = self.get_file_security_permissions_acl() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + return cd_action, modify + + def apply(self): + self.get_svm_uuid() + cd_action, modify = self.get_actions() + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_file_security_permissions_acl() + if cd_action == 'delete': + self.delete_file_security_permissions_acl() + if modify: + self.modify_file_security_permissions_acl() + self.validate_changes(cd_action, modify) + self.module.exit_json(changed=self.na_helper.changed) + + def validate_changes(self, cd_action, modify): + if self.parameters['validate_changes'] == 'ignore': + return + new_cd_action, new_modify = self.get_actions() + errors = [] + if new_cd_action is not None: + errors.append('%s still required after %s (with modify: %s)' % (new_cd_action, cd_action, modify)) + if new_modify: + errors.append('modify: %s still required after %s' % (new_modify, modify)) + if errors: + msg = 'Error - %s' % ' - '.join(errors) + if self.parameters['validate_changes'] == 'error': + self.module.fail_json(msg=msg) + if self.parameters['validate_changes'] == 'warn': + self.module.warn(msg) + + +def main(): + """Apply volume operations from playbook""" + obj = NetAppOntapFileSecurityPermissionsACL() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py new file mode 100644 index 000000000..7addf9c02 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firewall_policy.py @@ -0,0 +1,325 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_firewall_policy +short_description: NetApp ONTAP Manage a firewall policy +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Configure firewall on an ONTAP node and manage firewall policy for an ONTAP SVM +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +requirements: + - Python package ipaddress. Install using 'pip install ipaddress' +options: + state: + description: + - Whether to set up a firewall policy or not + choices: ['present', 'absent'] + type: str + default: present + allow_list: + description: + - A list of IPs and masks to use. + - The host bits of the IP addresses used in this list must be set to 0. + type: list + elements: str + policy: + description: + - A policy name for the firewall policy + type: str + service: + description: + - The service to apply the policy to + - https and ssh are not supported starting with ONTAP 9.6 + - portmap is supported for ONTAP 9.4, 9.5 and 9.6 + - none is supported for ONTAP 9.8 onwards. + choices: ['dns', 'http', 'https', 'ndmp', 'ndmps', 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet', 'none'] + type: str + vserver: + description: + - The Vserver to apply the policy to. + type: str + enable: + description: + - enable firewall on a node + choices: ['enable', 'disable'] + type: str + logging: + description: + - enable logging for firewall on a node + choices: ['enable', 'disable'] + type: str + node: + description: + - The node to run the firewall configuration on + type: str +''' + +EXAMPLES = """ + - name: create firewall Policy + netapp.ontap.na_ontap_firewall_policy: + state: present + allow_list: [1.2.3.0/24,1.3.0.0/16] + policy: pizza + service: http + vserver: ci_dev + hostname: "{{ netapp hostname }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + + - name: Modify firewall Policy + netapp.ontap.na_ontap_firewall_policy: + state: present + allow_list: [1.5.3.0/24] + policy: pizza + service: http + vserver: ci_dev + hostname: "{{ netapp hostname }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + + - name: Destory firewall Policy + netapp.ontap.na_ontap_firewall_policy: + state: absent + policy: pizza + service: http + vserver: ci_dev + hostname: "{{ netapp hostname }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + + - name: Enable firewall and logging on a node + netapp.ontap.na_ontap_firewall_policy: + node: test-vsim1 + enable: enable + logging: enable + hostname: "{{ netapp hostname }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import netapp_ipaddress + + +class NetAppONTAPFirewallPolicy: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + allow_list=dict(required=False, type='list', elements='str'), + policy=dict(required=False, type='str'), + service=dict(required=False, type='str', choices=['dns', 'http', 'https', 'ndmp', 'ndmps', + 'ntp', 'portmap', 'rsh', 'snmp', 'ssh', 'telnet', 'none']), + vserver=dict(required=False, type="str"), + enable=dict(required=False, type="str", choices=['enable', 'disable']), + logging=dict(required=False, type="str", choices=['enable', 'disable']), + node=dict(required=False, type="str") + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_together=(['policy', 'service', 'vserver'], + ['enable', 'node'] + ), + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_replaces('na_ontap_service_policy', self.module) + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def validate_ip_addresses(self): + ''' + Validate if the given IP address is a network address (i.e. it's host bits are set to 0) + ONTAP doesn't validate if the host bits are set, + and hence doesn't add a new address unless the IP is from a different network. + So this validation allows the module to be idempotent. + :return: None + ''' + for ip in self.parameters['allow_list']: + netapp_ipaddress.validate_ip_address_is_network_address(ip, self.module) + + def get_firewall_policy(self): + """ + Get a firewall policy + :return: returns a firewall policy object, or returns False if there are none + """ + net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-get-iter") + attributes = { + 'query': { + 'net-firewall-policy-info': self.firewall_policy_attributes() + } + } + net_firewall_policy_obj.translate_struct(attributes) + + try: + result = self.server.invoke_successfully(net_firewall_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error getting firewall policy %s:%s" % (self.parameters['policy'], + to_native(error)), + exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + policy_info = attributes_list.get_child_by_name('net-firewall-policy-info') + ips = self.na_helper.get_value_for_list(from_zapi=True, + zapi_parent=policy_info.get_child_by_name('allow-list')) + return { + 'service': policy_info['service'], + 'allow_list': ips} + return None + + def create_firewall_policy(self): + """ + Create a firewall policy for given vserver + :return: None + """ + net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-create") + net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes()) + if self.parameters.get('allow_list'): + self.validate_ip_addresses() + net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False, + zapi_parent='allow-list', + zapi_child='ip-and-mask', + data=self.parameters['allow_list']) + ) + try: + self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error creating Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc()) + + def destroy_firewall_policy(self): + """ + Destroy a Firewall Policy from a vserver + :return: None + """ + net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-destroy") + net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes()) + try: + self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error destroying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc()) + + def modify_firewall_policy(self, modify): + """ + Modify a firewall Policy on a vserver + :return: none + """ + self.validate_ip_addresses() + net_firewall_policy_obj = netapp_utils.zapi.NaElement("net-firewall-policy-modify") + net_firewall_policy_obj.translate_struct(self.firewall_policy_attributes()) + net_firewall_policy_obj.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False, + zapi_parent='allow-list', + zapi_child='ip-and-mask', + data=modify['allow_list'])) + try: + self.server.invoke_successfully(net_firewall_policy_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error modifying Firewall Policy: %s" % (to_native(error)), exception=traceback.format_exc()) + + def firewall_policy_attributes(self): + return { + 'policy': self.parameters['policy'], + 'service': self.parameters['service'], + 'vserver': self.parameters['vserver'], + } + + def get_firewall_config_for_node(self): + """ + Get firewall configuration on the node + :return: dict() with firewall config details + """ + if self.parameters.get('logging') and self.parameters.get('node') is None: + self.module.fail_json(msg='Error: Missing parameter \'node\' to modify firewall logging') + net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-get") + net_firewall_config_obj.add_new_child('node-name', self.parameters['node']) + try: + result = self.server.invoke_successfully(net_firewall_config_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error getting Firewall Configuration: %s" % (to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('attributes'): + firewall_info = result['attributes'].get_child_by_name('net-firewall-config-info') + return {'enable': self.change_status_to_bool(firewall_info.get_child_content('is-enabled'), to_zapi=False), + 'logging': self.change_status_to_bool(firewall_info.get_child_content('is-logging'), to_zapi=False)} + return None + + def modify_firewall_config(self, modify): + """ + Modify the configuration of a firewall on node + :return: None + """ + net_firewall_config_obj = netapp_utils.zapi.NaElement("net-firewall-config-modify") + net_firewall_config_obj.add_new_child('node-name', self.parameters['node']) + if modify.get('enable'): + net_firewall_config_obj.add_new_child('is-enabled', self.change_status_to_bool(self.parameters['enable'])) + if modify.get('logging'): + net_firewall_config_obj.add_new_child('is-logging', self.change_status_to_bool(self.parameters['logging'])) + try: + self.server.invoke_successfully(net_firewall_config_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error modifying Firewall Config: %s" % (to_native(error)), + exception=traceback.format_exc()) + + def change_status_to_bool(self, input, to_zapi=True): + if to_zapi: + return 'true' if input == 'enable' else 'false' + else: + return 'enable' if input == 'true' else 'disable' + + def apply(self): + cd_action, modify, modify_config = None, None, None + if self.parameters.get('policy'): + current = self.get_firewall_policy() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.parameters.get('node'): + current_config = self.get_firewall_config_for_node() + # firewall config for a node is always present, we cannot create or delete a firewall on a node + modify_config = self.na_helper.get_modified_attributes(current_config, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_firewall_policy() + elif cd_action == 'delete': + self.destroy_firewall_policy() + else: + if modify: + self.modify_firewall_policy(modify) + if modify_config: + self.modify_firewall_config(modify_config) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'modify_config': modify_config}) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + :return: nothing + """ + cg_obj = NetAppONTAPFirewallPolicy() + cg_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py new file mode 100644 index 000000000..63966d4e8 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_firmware_upgrade.py @@ -0,0 +1,873 @@ +#!/usr/bin/python + +# (c) 2019-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Update ONTAP service-prosessor firmware + - The recommend procedure is to + 1. download the firmware package from the NetApp Support site + 2. copy the package to a web server + 3. download the package from the web server using this module + - Once a disk qualification, disk, shelf, or ACP firmware package is downloaded, ONTAP will automatically update the related resources in background. + - It may take some time to complete. + - For service processor, the update requires a node reboot to take effect. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_firmware_upgrade +options: + state: + description: + - Whether the specified ONTAP firmware should be upgraded or not. + default: present + type: str + node: + description: + - Node on which the device is located. + - Not required if package_url is present and force_disruptive_update is False. + - If this option is not given, the firmware will be downloaded on all nodes in the cluster, + - and the resources will be updated in background on all nodes, except for service processor. + - For service processor, the upgrade will happen automatically when each node is rebooted. + type: str + clear_logs: + description: + - Clear logs on the device after update. Default value is true. + - Not used if force_disruptive_update is False. + - Not supported with REST when set to false. + type: bool + default: true + package: + description: + - Name of the package file containing the firmware to be installed. Not required when -baseline is true. + - Not used if force_disruptive_update is False. + - Not supported with REST. + type: str + package_url: + description: + - URL of the package file containing the firmware to be downloaded. + - Once the package file is downloaded to a node, the firmware update will happen automatically in background. + - For SP, the upgrade will happen automatically when a node is rebooted. + - For SP, the upgrade will happen automatically if autoupdate is enabled (which is the recommended setting). + version_added: "20.5.0" + type: str + force_disruptive_update: + description: + - If set to C(False), and URL is given, the upgrade is non disruptive. If URL is not given, no operation is performed. + - Do not set this to C(True), unless directed by NetApp Tech Support. + - It will force an update even if the resource is not ready for it, and can be disruptive. + - Not supported with REST when set to true. + type: bool + version_added: "20.5.0" + default: false + shelf_module_fw: + description: + - Shelf module firmware to be updated to. + - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware) + - Not supported with REST. + type: str + disk_fw: + description: + - disk firmware to be updated to. + - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware) + - Not supported with REST. + type: str + update_type: + description: + - Type of firmware update to be performed. Options include serial_full, serial_differential, network_full. + - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware) + - Not supported with REST. + type: str + install_baseline_image: + description: + - Install the version packaged with ONTAP if this parameter is set to true. Otherwise, package must be used to specify the package to install. + - Not used if force_disruptive_update is False (ONTAP will automatically select the firmware) + - Not supported with REST when set to true. + type: bool + default: false + firmware_type: + description: + - Type of firmware to be upgraded. Options include shelf, ACP, service-processor, and disk. + - For shelf firmware upgrade the operation is asynchronous, and therefore returns no errors that might occur during the download process. + - Shelf firmware upgrade is idempotent if shelf_module_fw is provided . + - disk firmware upgrade is idempotent if disk_fw is provided . + - With check mode, SP, ACP, disk, and shelf firmware upgrade is not idempotent. + - This operation will only update firmware on shelves/disk that do not have the latest firmware-revision. + - For normal operations, choose one of storage or service-processor. + - Type storage includes acp, shelf and disk and ONTAP will automatically determine what to do. + - With REST, the module does not validate that the package matches the firmware type. ONTAP determines the type automatically. + - With REST, C(storage) downloads any firmware, including service-processor firmware. + - With REST, C(service-processor) unlocks SP reboot options. + choices: ['storage', 'service-processor', 'shelf', 'acp', 'disk'] + type: str + default: storage + fail_on_502_error: + description: + - The firmware download may take time if the web server is slow and if there are many nodes in the cluster. + - ONTAP will break the ZAPI connection after 5 minutes with a 502 Bad Gateway error, even though the download + is still happening. + - By default, this module ignores this error and assumes the download is progressing as ONTAP does not + provide a way to check the status. + - When setting this option to true, the module will report 502 as an error. + - Not supported with REST when set to true. + type: bool + default: false + version_added: "20.6.0" + rename_package: + description: + - Rename the package. + - Only available if 'firmware_type' is 'service-processor'. + - Not supported with REST. + type: str + version_added: "20.7.0" + replace_package: + description: + - Replace the local package. + - Only available if 'firmware_type' is 'service-processor'. + - Not supported with REST when set to false. + type: bool + version_added: "20.7.0" + reboot_sp: + description: + - Reboot service processor before downloading package. + - Only available if 'firmware_type' is 'service-processor'. + - Defaults to True if not set when 'firmware_type' is 'service-processor'. + - Set this explictly to true to avoid a warning, and to false to not reboot the SP. + - Rebooting the SP before download is strongly recommended. + type: bool + version_added: "20.7.0" + reboot_sp_after_download: + description: + - Reboot service processor after downloading package. + - Only available if 'firmware_type' is 'service-processor'. + type: bool + version_added: "21.15.0" + server_username: + description: + - username to authenticate with the firmware package server. + - Ignored with ZAPI. + type: str + version_added: "21.15.0" + server_password: + description: + - password to authenticate with the firmware package server. + - Ignored with ZAPI. + type: str + version_added: "21.15.0" +short_description: NetApp ONTAP firmware upgrade for SP, shelf, ACP, and disk. +version_added: 2.9.0 +''' + +EXAMPLES = """ + + - name: firmware upgrade + netapp.ontap.na_ontap_firmware_upgrade: + state: present + package_url: "{{ web_link }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: firmware upgrade, confirm successful download + netapp.ontap.na_ontap_firmware_upgrade: + state: present + package_url: "{{ web_link }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + fail_on_502_error: true + - name: SP firmware upgrade + netapp.ontap.na_ontap_firmware_upgrade: + state: present + node: vsim1 + package: "{{ file name }}" + package_url: "{{ web_link }}" + clear_logs: True + install_baseline_image: False + update_type: serial_full + force_disruptive_update: False + firmware_type: service-processor + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: SP firmware download replace package + tags: + - sp_download + netapp.ontap.na_ontap_firmware_upgrade: + state: present + node: vsim1 + package_url: "{{ web_link }}" + firmware_type: service-processor + replace_package: true + reboot_sp: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + - name: SP firmware download rename package + tags: + - sp_download + netapp.ontap.na_ontap_firmware_upgrade: + state: present + node: vsim1 + package_url: "{{ web_link }}" + firmware_type: service-processor + rename_package: SP_FW.zip + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + - name: ACP firmware download and upgrade + netapp.ontap.na_ontap_firmware_upgrade: + state: present + node: vsim1 + firmware_type: acp + package_url: "{{ web_link }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: shelf firmware upgrade + netapp.ontap.na_ontap_firmware_upgrade: + state: present + firmware_type: shelf + package_url: "{{ web_link }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: disk firmware upgrade + netapp.ontap.na_ontap_firmware_upgrade: + state: present + firmware_type: disk + package_url: "{{ web_link }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: any firmware upgrade (REST) + netapp.ontap.na_ontap_firmware_upgrade: + state: present + package_url: "{{ web_link }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: SP firmware upgrade with reboots (REST) + netapp.ontap.na_ontap_firmware_upgrade: + state: present + package_url: "{{ web_link }}" + firmware_type: service-processor + reboot_sp_: true + reboot_sp_after_download: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +msg: + description: Returns additional information in case of success. + returned: always + type: str +""" + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +MSGS = dict( + no_action='No action taken.', + dl_completed='Firmware download completed.', + dl_completed_slowly='Firmware download completed, slowly.', + dl_in_progress='Firmware download still in progress.' +) + + +class NetAppONTAPFirmwareUpgrade: + """ + Class with ONTAP firmware upgrade methods + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', default='present'), + node=dict(required=False, type='str'), + firmware_type=dict(type='str', choices=['storage', 'service-processor', 'shelf', 'acp', 'disk'], default='storage'), + clear_logs=dict(required=False, type='bool', default=True), + package=dict(required=False, type='str'), + install_baseline_image=dict(required=False, type='bool', default=False), + update_type=dict(required=False, type='str'), + shelf_module_fw=dict(required=False, type='str'), + disk_fw=dict(required=False, type='str'), + package_url=dict(required=False, type='str'), + force_disruptive_update=dict(required=False, type='bool', default=False), + fail_on_502_error=dict(required=False, type='bool', default=False), + rename_package=dict(required=False, type='str'), + replace_package=dict(required=False, type='bool'), + reboot_sp=dict(required=False, type='bool'), + reboot_sp_after_download=dict(required=False, type='bool'), + server_username=dict(required=False, type='str'), + server_password=dict(required=False, type='str', no_log=True), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('firmware_type', 'acp', ['node']), + ('firmware_type', 'disk', ['node']), + ('firmware_type', 'service-processor', ['node']), + ('force_disruptive_update', True, ['firmware_type']), + ('reboot_sp', True, ['node']), + ('reboot_sp_after_download', True, ['node']), + ], + required_together=[['server_username', 'server_password']], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self._node_uuid = None # to cache calls to get_node_uuid + + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['package', 'update_type', 'rename_package', 'shelf_module_fw', 'disk_fw'] + # only accept default value for these 5 options (2 True and 3 False) + # accept the default value (for replace_package, this is implicit for REST) but switch to ZAPI or error out if set to False + unsupported_rest_properties.extend(option for option in ('clear_logs', 'replace_package') if self.parameters.get(option) is False) + # accept the default value of False, but switch to ZAPI or error out if set to True + unsupported_rest_properties.extend(option for option in ('install_baseline_image', 'force_disruptive_update', 'fail_on_502_error') + if self.parameters[option]) + + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + + if self.parameters.get('firmware_type') == 'storage' and self.parameters.get('force_disruptive_update'): + self.module.fail_json(msg='Do not set force_disruptive_update to True, unless directed by NetApp Tech Support') + + for option in ('reboot_sp', 'reboot_sp_after_download'): + if self.parameters.get('firmware_type') != 'service-processor' and self.parameters.get(option): + self.module.warn('%s is ignored when firmware_type is not set to service-processor' % option) + if self.parameters.get('firmware_type') == 'service-processor' and self.parameters.get('reboot_sp') is None: + self.module.warn('Forcing a reboot of SP before download - set reboot_sp: true to disable this warning.') + self.parameters['reboot_sp'] = True + if not self.use_rest and self.parameters.get('firmware_type') == 'service-processor': + msg = 'With ZAPI and firmware_type set to service-processor: ' + if 'node' not in self.parameters: + self.module.fail_json(msg=msg + 'parameter node should be present.') + if self.parameters.get('install_baseline_image') and self.parameters.get('package') is not None: + self.module.fail_json(msg=msg + 'do not specify both package and install_baseline_image: true.') + if self.parameters.get('force_disruptive_update') \ + and self.parameters.get('install_baseline_image') is False \ + and self.parameters.get('package') is None: + self.module.fail_json(msg=msg + 'specify at least one of package or install_baseline_image: true.') + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True) + + def firmware_image_get_iter(self): + """ + Compose NaElement object to query current firmware version + :return: NaElement object for firmware_image_get_iter with query + """ + firmware_image_get = netapp_utils.zapi.NaElement('service-processor-get-iter') + query = netapp_utils.zapi.NaElement('query') + firmware_image_info = netapp_utils.zapi.NaElement('service-processor-info') + firmware_image_info.add_new_child('node', self.parameters['node']) + query.add_child_elem(firmware_image_info) + firmware_image_get.add_child_elem(query) + return firmware_image_get + + def firmware_image_get(self, node_name): + """ + Get current firmware image info + :return: True if query successful, else return None + """ + firmware_image_get_iter = self.firmware_image_get_iter() + try: + result = self.server.invoke_successfully(firmware_image_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching firmware image details: %s: %s' + % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + # return firmware image details + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + sp_info = result.get_child_by_name('attributes-list').get_child_by_name('service-processor-info') + return sp_info.get_child_content('firmware-version') + return None + + def acp_firmware_update_required(self): + """ + where acp firmware upgrade is required + :return: True is firmware upgrade is required else return None + """ + acp_firmware_get_iter = netapp_utils.zapi.NaElement('storage-shelf-acp-module-get-iter') + query = netapp_utils.zapi.NaElement('query') + acp_info = netapp_utils.zapi.NaElement('storage-shelf-acp-module') + query.add_child_elem(acp_info) + acp_firmware_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(acp_firmware_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching acp firmware details details: %s' + % (to_native(error)), exception=traceback.format_exc()) + acp_module_info = self.na_helper.safe_get(result, ['attributes-list', 'storage-shelf-acp-module']) + if acp_module_info: + state = acp_module_info.get_child_content('state') + if state == 'firmware_update_required': + # acp firmware version upgrade required + return True + return False + + def sp_firmware_image_update_progress_get(self, node_name): + """ + Get current firmware image update progress info + :return: Dictionary of firmware image update progress if query successful, else return None + """ + firmware_update_progress_get = netapp_utils.zapi.NaElement('service-processor-image-update-progress-get') + firmware_update_progress_get.add_new_child('node', self.parameters['node']) + + firmware_update_progress_info = {} + try: + result = self.server.invoke_successfully(firmware_update_progress_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching firmware image upgrade progress details: %s' + % (to_native(error)), exception=traceback.format_exc()) + # return firmware image update progress details + if result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info'): + update_progress_info = result.get_child_by_name('attributes').get_child_by_name('service-processor-image-update-progress-info') + firmware_update_progress_info['is-in-progress'] = update_progress_info.get_child_content('is-in-progress') + firmware_update_progress_info['node'] = update_progress_info.get_child_content('node') + return firmware_update_progress_info + + def shelf_firmware_info_get(self): + """ + Get the current firmware of shelf module + :return:dict with module id and firmware info + """ + shelf_id_fw_info = {} + shelf_firmware_info_get = netapp_utils.zapi.NaElement('storage-shelf-info-get-iter') + desired_attributes = netapp_utils.zapi.NaElement('desired-attributes') + storage_shelf_info = netapp_utils.zapi.NaElement('storage-shelf-info') + shelf_module = netapp_utils.zapi.NaElement('shelf-modules') + shelf_module_info = netapp_utils.zapi.NaElement('storage-shelf-module-info') + shelf_module.add_child_elem(shelf_module_info) + storage_shelf_info.add_child_elem(shelf_module) + desired_attributes.add_child_elem(storage_shelf_info) + shelf_firmware_info_get.add_child_elem(desired_attributes) + + try: + result = self.server.invoke_successfully(shelf_firmware_info_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching shelf module firmware details: %s' + % (to_native(error)), exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + shelf_info = result.get_child_by_name('attributes-list').get_child_by_name('storage-shelf-info') + if (shelf_info.get_child_by_name('shelf-modules') and + shelf_info.get_child_by_name('shelf-modules').get_child_by_name('storage-shelf-module-info')): + shelves = shelf_info['shelf-modules'].get_children() + for shelf in shelves: + shelf_id_fw_info[shelf.get_child_content('module-id')] = shelf.get_child_content('module-fw-revision') + return shelf_id_fw_info + + def disk_firmware_info_get(self): + """ + Get the current firmware of disks module + :return: + """ + disk_id_fw_info = {} + disk_firmware_info_get = netapp_utils.zapi.NaElement('storage-disk-get-iter') + desired_attributes = netapp_utils.zapi.NaElement('desired-attributes') + storage_disk_info = netapp_utils.zapi.NaElement('storage-disk-info') + disk_inv = netapp_utils.zapi.NaElement('disk-inventory-info') + storage_disk_info.add_child_elem(disk_inv) + desired_attributes.add_child_elem(storage_disk_info) + disk_firmware_info_get.add_child_elem(desired_attributes) + try: + result = self.server.invoke_successfully(disk_firmware_info_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching disk module firmware details: %s' + % (to_native(error)), exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + disk_info = result.get_child_by_name('attributes-list') + disks = disk_info.get_children() + for disk in disks: + disk_id_fw_info[disk.get_child_content('disk-uid')] = disk.get_child_by_name('disk-inventory-info').get_child_content('firmware-revision') + return disk_id_fw_info + + def disk_firmware_update_required(self): + """ + Check weather disk firmware upgrade is required or not + :return: True if the firmware upgrade is required + """ + disk_firmware_info = self.disk_firmware_info_get() + return any( + disk_firmware_info[disk] != self.parameters['disk_fw'] + for disk in disk_firmware_info + ) + + def shelf_firmware_update_required(self): + """ + Check weather shelf firmware upgrade is required or not + :return: True if the firmware upgrade is required + """ + shelf_firmware_info = self.shelf_firmware_info_get() + return any( + shelf_firmware_info[module] != self.parameters['shelf_module_fw'] + for module in shelf_firmware_info + ) + + def sp_firmware_image_update(self): + """ + Update current firmware image + """ + firmware_update_info = netapp_utils.zapi.NaElement('service-processor-image-update') + if self.parameters.get('package') is not None: + firmware_update_info.add_new_child('package', self.parameters['package']) + if self.parameters.get('clear_logs') is not None: + firmware_update_info.add_new_child('clear-logs', str(self.parameters['clear_logs'])) + if self.parameters.get('install_baseline_image') is not None: + firmware_update_info.add_new_child('install-baseline-image', str(self.parameters['install_baseline_image'])) + firmware_update_info.add_new_child('node', self.parameters['node']) + firmware_update_info.add_new_child('update-type', self.parameters['update_type']) + + try: + self.server.invoke_successfully(firmware_update_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + # Current firmware version matches the version to be installed + if to_native(error.code) == '13001' and (error.message.startswith('Service Processor update skipped')): + return False + self.module.fail_json(msg='Error updating firmware image for %s: %s' + % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + return True + + def shelf_firmware_upgrade(self): + """ + Upgrade shelf firmware image + """ + shelf_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-firmware-update') + try: + self.server.invoke_successfully(shelf_firmware_update_info, enable_tunneling=True) + return True + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error updating shelf firmware image : %s' + % (to_native(error)), exception=traceback.format_exc()) + + def acp_firmware_upgrade(self): + + """ + Upgrade shelf firmware image + """ + acp_firmware_update_info = netapp_utils.zapi.NaElement('storage-shelf-acp-firmware-update') + acp_firmware_update_info.add_new_child('node-name', self.parameters['node']) + try: + self.server.invoke_successfully(acp_firmware_update_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error updating acp firmware image : %s' + % (to_native(error)), exception=traceback.format_exc()) + + def disk_firmware_upgrade(self): + + """ + Upgrade disk firmware + """ + disk_firmware_update_info = netapp_utils.zapi.NaElement('disk-update-disk-fw') + disk_firmware_update_info.add_new_child('node-name', self.parameters['node']) + try: + self.server.invoke_successfully(disk_firmware_update_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error updating disk firmware image : %s' + % (to_native(error)), exception=traceback.format_exc()) + return True + + def download_firmware(self): + if self.use_rest: + return self.download_software_rest() + + ''' calls the system-cli ZAPI as there is no ZAPI for this feature ''' + msg = MSGS['dl_completed'] + command = ['storage', 'firmware', 'download', '-node', self.parameters['node'] if self.parameters.get('node') else '*', + '-package-url', self.parameters['package_url']] + command_obj = netapp_utils.zapi.NaElement("system-cli") + + args_obj = netapp_utils.zapi.NaElement("args") + for arg in command: + args_obj.add_new_child('arg', arg) + command_obj.add_child_elem(args_obj) + command_obj.add_new_child('priv', 'advanced') + + output = None + try: + output = self.server.invoke_successfully(command_obj, True) + + except netapp_utils.zapi.NaApiError as error: + # with netapp_lib, error.code may be a number or a string + try: + err_num = int(error.code) + except ValueError: + err_num = -1 + if err_num == 60: # API did not finish on time + # even if the ZAPI reports a timeout error, it does it after the command completed + msg = MSGS['dl_completed_slowly'] + elif err_num == 502 and not self.parameters['fail_on_502_error']: # Bad Gateway + # ONTAP proxy breaks the connection after 5 minutes, we can assume the download is progressing slowly + msg = MSGS['dl_in_progress'] + else: + self.module.fail_json(msg='Error running command %s: %s' % (command, to_native(error)), + exception=traceback.format_exc()) + except netapp_utils.zapi.etree.XMLSyntaxError as error: + self.module.fail_json(msg='Error decoding output from command %s: %s' % (command, to_native(error)), + exception=traceback.format_exc()) + + if output is not None: + # command completed, check for success + status = output.get_attr('status') + cli_output = output.get_child_content('cli-output') + if status is None or status != 'passed' or cli_output is None or cli_output == "": + if status is None: + extra_info = "'status' attribute missing" + elif status != 'passed': + extra_info = "check 'status' value" + else: + extra_info = 'check console permissions' + self.module.fail_json(msg='unable to download package from %s: %s. Received: %s' % + (self.parameters['package_url'], extra_info, output.to_string())) + + if cli_output is not None: + if cli_output.startswith('Error:') or \ + 'Failed to download package from' in cli_output: + self.module.fail_json(msg='failed to download package from %s: %s' % (self.parameters['package_url'], cli_output)) + msg += " Extra info: %s" % cli_output + + return msg + + def download_sp_image(self): + fetch_package = netapp_utils.zapi.NaElement('system-image-fetch-package') + fetch_package.add_new_child('node', self.parameters['node']) + fetch_package.add_new_child('package', self.parameters['package_url']) + if self.parameters.get('rename_package'): + fetch_package.add_new_child('rename-package', self.parameters['rename_package']) + if self.parameters.get('replace_package'): + fetch_package.add_new_child('replace-package', str(self.parameters['replace_package'])) + try: + self.server.invoke_successfully(fetch_package, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching system image package from %s: %s' + % (self.parameters['package_url'], to_native(error)), + exception=traceback.format_exc()) + + def download_sp_image_progress(self): + progress = netapp_utils.zapi.NaElement('system-image-update-progress-get') + progress.add_new_child('node', self.parameters['node']) + progress_info = {} + try: + result = self.server.invoke_successfully(progress, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching system image package download progress: %s' + % (to_native(error)), exception=traceback.format_exc()) + if result.get_child_by_name('phase'): + progress_info['phase'] = result.get_child_content('phase') + else: + progress_info['phase'] = None + if result.get_child_by_name('exit-message'): + progress_info['exit_message'] = result.get_child_content('exit-message') + else: + progress_info['exit_message'] = None + if result.get_child_by_name('exit-status'): + progress_info['exit_status'] = result.get_child_content('exit-status') + else: + progress_info['exit_status'] = None + if result.get_child_by_name('last-message'): + progress_info['last_message'] = result.get_child_content('last-message') + else: + progress_info['last_message'] = None + if result.get_child_by_name('run-status'): + progress_info['run_status'] = result.get_child_content('run-status') + else: + progress_info['run_status'] = None + return progress_info + + def reboot_sp(self): + if self.use_rest: + return self.reboot_sp_rest() + reboot = netapp_utils.zapi.NaElement('service-processor-reboot') + reboot.add_new_child('node', self.parameters['node']) + try: + self.server.invoke_successfully(reboot, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error rebooting service processor: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def get_node_uuid(self): + if self._node_uuid is not None: + return self._node_uuid + api = 'cluster/nodes' + query = {'name': self.parameters['node']} + node, error = rest_generic.get_one_record(self.rest_api, api, query, fields='uuid') + if error: + self.module.fail_json(msg='Error reading node UUID: %s' % error) + if not node: + self.module.fail_json(msg='Error: node not found %s, current nodes: %s.' % (self.parameters['node'], ', '.join(self.get_node_names()))) + self._node_uuid = node['uuid'] + return node['uuid'] + + def get_node_names(self): + api = 'cluster/nodes' + nodes, error = rest_generic.get_0_or_more_records(self.rest_api, api, fields='name') + if error: + self.module.fail_json(msg='Error reading nodes: %s' % error) + return [node['name'] for node in nodes] + + def reboot_sp_rest_cli(self): + """ for older versions of ONTAP, use the REST CLI passthrough """ + api = 'private/cli/sp/reboot-sp' + query = {'node': self.parameters['node']} + dummy, error = rest_generic.patch_async(self.rest_api, api, None, None, query) + return error + + def get_sp_state(self): + api = 'cluster/nodes/%s' % self.get_node_uuid() + node, error = rest_generic.get_one_record(self.rest_api, api, fields='service_processor.state') + if error: + self.module.fail_json(msg='Error getting node SP state: %s' % error) + if node: + return self.na_helper.safe_get(node, ['service_processor', 'state']) + + def wait_for_sp_reboot(self): + for dummy in range(20): + time.sleep(15) + state = self.get_sp_state() + if state != 'rebooting': + break + else: + self.module.warn('node did not finish up booting in 5 minutes!') + + def reboot_sp_rest(self): + uuid = self.get_node_uuid() + api = 'cluster/nodes' + body = {'service_processor.action': 'reboot'} + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error and 'Unexpected argument "service_processor.action"' in error: + error = self.reboot_sp_rest_cli() + if error: + error = 'reboot_sp requires ONTAP 9.10.1 or newer, falling back to CLI passthrough failed: ' + error + if error: + self.module.fail_json(msg='Error rebooting node SP: %s' % error) + + def download_sp_firmware(self): + if self.parameters.get('reboot_sp'): + self.reboot_sp() + if self.use_rest: + return self.download_software_rest() + self.download_sp_image() + progress = self.download_sp_image_progress() + # progress only show the current or most recent update/install operation. + if progress['phase'] == 'Download': + while progress['run_status'] is not None and progress['run_status'] != 'Exited': + time.sleep(10) + progress = self.download_sp_image_progress() + if progress['exit_status'] != 'Success': + self.module.fail_json(msg=progress['exit_message'], exception=traceback.format_exc()) + return MSGS['dl_completed'] + return MSGS['no_action'] + + def download_software_rest(self): + body = {'url': self.parameters['package_url']} + for attr in ('username', 'password'): + value = self.parameters.get('server_%s' % attr) + if value: + body[attr] = value + api = 'cluster/software/download' + # burt 1442080 - when timeout is 30, the API may return a 500 error, though the job says download completed! + message, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=self.parameters.get('time_out', 180), timeout=0) + if error: + self.module.fail_json(msg='Error downloading software: %s' % error) + return message + + def apply(self): + """ + Apply action to upgrade firmware + """ + changed = False + msg = MSGS['no_action'] + if self.parameters.get('package_url'): + if not self.module.check_mode: + if self.parameters.get('firmware_type') == 'service-processor': + msg = self.download_sp_firmware() + if self.parameters.get('reboot_sp') and self.use_rest: + self.wait_for_sp_reboot() + else: + msg = self.download_firmware() + changed = True + if not self.parameters['force_disruptive_update'] and not self.parameters.get('reboot_sp_after update'): + # disk_qual, disk, shelf, and ACP are automatically updated in background + # The SP firmware is automatically updated on reboot + self.module.exit_json(changed=changed, msg=msg) + if msg == MSGS['dl_in_progress']: + # can't force an update if the software is still downloading + self.module.fail_json(msg="Cannot force update: %s" % msg) + self.disruptive_update(changed) + + def disruptive_update(self, changed): + if self.parameters.get('firmware_type') == 'service-processor': + if self.parameters.get('reboot_sp_after update'): + self.reboot_sp() + if not self.parameters['force_disruptive_update']: + return + # service-processor firmware upgrade + current = self.firmware_image_get(self.parameters['node']) + + if self.parameters.get('state') == 'present' and current: + if not self.module.check_mode: + if self.sp_firmware_image_update(): + changed = True + firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node']) + while firmware_update_progress.get('is-in-progress') == 'true': + time.sleep(25) + firmware_update_progress = self.sp_firmware_image_update_progress_get(self.parameters['node']) + else: + # we don't know until we try the upgrade + changed = True + + elif self.parameters.get('firmware_type') == 'shelf': + # shelf firmware upgrade + if self.parameters.get('shelf_module_fw'): + if self.shelf_firmware_update_required(): + changed = True if self.module.check_mode else self.shelf_firmware_upgrade() + else: + # with check_mode, we don't know until we try the upgrade -- assuming the worst + changed = True if self.module.check_mode else self.shelf_firmware_upgrade() + elif self.parameters.get('firmware_type') == 'acp' and self.acp_firmware_update_required(): + # acp firmware upgrade + if not self.module.check_mode: + self.acp_firmware_upgrade() + changed = True + elif self.parameters.get('firmware_type') == 'disk': + # Disk firmware upgrade + if self.parameters.get('disk_fw'): + if self.disk_firmware_update_required(): + changed = True if self.module.check_mode else self.disk_firmware_upgrade() + else: + # with check_mode, we don't know until we try the upgrade -- assuming the worst + changed = True if self.module.check_mode else self.disk_firmware_upgrade() + self.module.exit_json(changed=changed, msg='forced update for %s' % self.parameters.get('firmware_type')) + + +def main(): + """Execute action""" + fwupgrade_obj = NetAppONTAPFirmwareUpgrade() + fwupgrade_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py new file mode 100644 index 000000000..3abdec524 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_flexcache.py @@ -0,0 +1,672 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +na_ontap_flexcache +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +short_description: NetApp ONTAP FlexCache - create/delete relationship +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete FlexCache volume relationships. + - This module does not modify an existing FlexCache volume with two exceptions. + - When using REST, a prepopulate can be started on an exising FlexCache volume. + - When using REST, the volume can be mounted or unmounted. Set path to '' to unmount it. + - It is required the volume is mounted to prepopulate it. + - Some actions are also available through the na_ontap_volume. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_flexcache +version_added: 2.8.0 +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified relationship should exist or not. + default: present + type: str + origin_volume: + description: + - Name of the origin volume for the FlexCache. + - Required for creation. + type: str + origin_vserver: + description: + - Name of the origin vserver for the FlexCache. + - Required for creation. + type: str + origin_cluster: + description: + - Name of the origin cluster for the FlexCache. + - Defaults to cluster associated with target vserver if absent. + - Not used for creation. + type: str + name: + description: + - Name of the target volume for the FlexCache. + required: true + type: str + aliases: ['volume'] + version_added: 21.3.0 + junction_path: + description: + - Junction path of the cache volume. + type: str + aliases: ['path'] + auto_provision_as: + description: + - Use this parameter to automatically select existing aggregates for volume provisioning. Eg flexgroup + - Note that the fastest aggregate type with at least one aggregate on each node of the cluster will be selected. + - Ignored when using REST - omit aggr_list for automatic selection. + type: str + size: + description: + - Size of cache volume. + type: int + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + type: str + default: gb + vserver: + description: + - Name of the target vserver for the FlexCache. + - Note that hostname, username, password are intended for the target vserver. + required: true + type: str + aggr_list: + description: + - List of aggregates to host target FlexCache volume. + type: list + elements: str + aliases: ['aggregates'] + aggr_list_multiplier: + description: + - Aggregate list repeat count. + - REST - Number of FlexCache constituents per aggregate when the C(aggregates) field is mentioned. + type: int + aliases: ['constituents_per_aggregate'] + force_unmount: + description: + - Unmount FlexCache volume. Delete the junction path at which the volume is mounted before deleting the FlexCache relationship. + type: bool + default: false + force_offline: + description: + - Offline FlexCache volume before deleting the FlexCache relationship. + - The volume will be destroyed and data can be lost. + type: bool + default: false + time_out: + description: + - time to wait for flexcache creation or deletion in seconds + - if 0, the request is asynchronous + - default is set to 3 minutes + type: int + default: 180 + prepopulate: + version_added: 21.3.0 + description: + - prepopulate FlexCache with data from origin volume. + - requires ONTAP 9.8 or later, and REST support. + - dir_paths must be set for this option to be effective. + type: dict + suboptions: + dir_paths: + description: + - List of directory paths in the owning SVM's namespace at which the FlexCache volume is mounted. + - Path must begin with '/'. + type: list + elements: str + required: true + exclude_dir_paths: + description: + - Directory path which needs to be excluded from prepopulation. + - Path must begin with '/'. + - Requires ONTAP 9.9 or later. + type: list + elements: str + recurse: + description: + - Specifies whether or not the prepopulate action should search through the directory-path recursively. + - If not set, the default value 'true' is used. + type: bool + force_prepopulate_if_already_created: + description: + - by default, this module will start a prepopulate task each time it is called, and is not idempotent. + - if set to false, the prepopulate task is not started if the FlexCache already exists. + type: bool + default: true +''' + +EXAMPLES = """ + + - name: Create FlexCache + netapp.ontap.na_ontap_flexcache: + state: present + origin_volume: test_src + name: test_dest + origin_vserver: ansible_src + vserver: ansible_dest + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete FlexCache + netapp.ontap.na_ontap_flexcache: + state: absent + name: test_dest + vserver: ansible_dest + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ +""" + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_volume + + +class NetAppONTAPFlexCache: + """ + Class with FlexCache methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], + default='present'), + origin_volume=dict(required=False, type='str'), # origins[0] + origin_vserver=dict(required=False, type='str'), # origins[0] + origin_cluster=dict(required=False, type='str'), # origins[0] + auto_provision_as=dict(required=False, type='str'), # ignored with REST + name=dict(required=True, type='str', aliases=['volume']), + junction_path=dict(required=False, type='str', aliases=['path']), + size=dict(required=False, type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + vserver=dict(required=True, type='str'), + aggr_list=dict(required=False, type='list', elements='str', aliases=['aggregates']), + aggr_list_multiplier=dict(required=False, type='int', aliases=['constituents_per_aggregate']), + force_offline=dict(required=False, type='bool', default=False), + force_unmount=dict(required=False, type='bool', default=False), + time_out=dict(required=False, type='int', default=180), + prepopulate=dict(required=False, type='dict', options=dict( + dir_paths=dict(required=True, type='list', elements='str'), + exclude_dir_paths=dict(required=False, type='list', elements='str'), + recurse=dict(required=False, type='bool'), + force_prepopulate_if_already_created=dict(required=False, type='bool', default=True), + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ('aggr_list', 'auto_provision_as'), + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters.get('size'): + self.parameters['size'] = self.parameters['size'] * netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']] + # setup later if required + self.origin_server = None + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + ontap_98_options = ['prepopulate'] + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8) and any(x in self.parameters for x in ontap_98_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_98_options, version='9.8')) + + if 'prepopulate' in self.parameters: + # sanitize the dictionary, as Ansible fills everything with None values + self.parameters['prepopulate'] = self.na_helper.filter_out_none_entries(self.parameters['prepopulate']) + ontap_99_options = ['exclude_dir_paths'] + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and any(x in self.parameters['prepopulate'] for x in ontap_99_options): + options = ['prepopulate: ' + x for x in ontap_99_options] + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(options, version='9.9')) + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def add_parameter_to_dict(self, adict, name, key, tostr=False): + ''' add defined parameter (not None) to adict using key ''' + value = self.parameters.get(name) + if value is not None: + adict[key] = str(value) if tostr else value + + def get_job(self, jobid, server): + """ + Get job details by id + """ + job_get = netapp_utils.zapi.NaElement('job-get') + job_get.add_new_child('job-id', jobid) + try: + result = server.invoke_successfully(job_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) == "15661": + # Not found + return None + self.module.fail_json(msg='Error fetching job info: %s' % to_native(error), + exception=traceback.format_exc()) + job_info = result.get_child_by_name('attributes').get_child_by_name('job-info') + return { + 'job-progress': job_info['job-progress'], + 'job-state': job_info['job-state'], + 'job-completion': job_info['job-completion'] if job_info.get_child_by_name('job-completion') is not None else None + } + + def check_job_status(self, jobid): + """ + Loop until job is complete + """ + server = self.server + sleep_time = 5 + time_out = self.parameters['time_out'] + while time_out > 0: + results = self.get_job(jobid, server) + # If running as cluster admin, the job is owned by cluster vserver + # rather than the target vserver. + if results is None and server == self.server: + results = netapp_utils.get_cserver(self.server) + server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) + continue + if results is None: + error = 'cannot locate job with id: %s' % jobid + break + if results['job-state'] in ('queued', 'running'): + time.sleep(sleep_time) + time_out -= sleep_time + continue + if results['job-state'] in ('success', 'failure'): + break + else: + self.module.fail_json(msg='Unexpected job status in: %s' % repr(results)) + + if results is not None: + if results['job-state'] == 'success': + error = None + elif results['job-state'] in ('queued', 'running'): + error = 'job completion exceeded expected timer of: %s seconds' % self.parameters['time_out'] + elif results['job-completion'] is not None: + error = results['job-completion'] + else: + error = results['job-progress'] + return error + + def flexcache_get_iter(self): + """ + Compose NaElement object to query current FlexCache relation + """ + options = {'volume': self.parameters['name']} + self.add_parameter_to_dict(options, 'origin_volume', 'origin-volume') + self.add_parameter_to_dict(options, 'origin_vserver', 'origin-vserver') + self.add_parameter_to_dict(options, 'origin_cluster', 'origin-cluster') + flexcache_info = netapp_utils.zapi.NaElement.create_node_with_children( + 'flexcache-info', **options) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(flexcache_info) + flexcache_get_iter = netapp_utils.zapi.NaElement('flexcache-get-iter') + flexcache_get_iter.add_child_elem(query) + return flexcache_get_iter + + def flexcache_get(self): + """ + Get current FlexCache relations + :return: Dictionary of current FlexCache details if query successful, else None + """ + if self.use_rest: + api = 'storage/flexcache/flexcaches' + query = { + 'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'] + } + if 'origin_cluster' in self.parameters: + query['origin.cluster.name'] = self.parameters['origin_cluster'] + fields = 'svm,name,uuid,path' + flexcache, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + self.na_helper.fail_on_error(error) + if flexcache is None: + return None + return dict( + vserver=flexcache['svm']['name'], + name=flexcache['name'], + uuid=flexcache['uuid'], + junction_path=flexcache.get('path') + ) + + flexcache_get_iter = self.flexcache_get_iter() + flex_info = {} + try: + result = self.server.invoke_successfully(flexcache_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching FlexCache info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + flexcache_info = result.get_child_by_name('attributes-list') \ + .get_child_by_name('flexcache-info') + flex_info['origin_cluster'] = flexcache_info.get_child_content('origin-cluster') + flex_info['origin_volume'] = flexcache_info.get_child_content('origin-volume') + flex_info['origin_vserver'] = flexcache_info.get_child_content('origin-vserver') + flex_info['size'] = flexcache_info.get_child_content('size') + flex_info['name'] = flexcache_info.get_child_content('volume') + flex_info['vserver'] = flexcache_info.get_child_content('vserver') + + return flex_info + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) > 1: + msg = 'Multiple records found for %s:' % self.parameters['name'] + self.module.fail_json(msg='Error fetching FlexCache info: %s' % msg) + return None + + def flexcache_rest_create_body(self, mappings): + ''' maps self.parameters to REST API body attributes, using mappings to identify fields to add ''' + body = {} + for key, value in mappings.items(): + if key in self.parameters: + if key == 'aggr_list': + body[value] = [dict(name=aggr) for aggr in self.parameters[key]] + else: + body[value] = self.parameters[key] + elif key == 'origins': + # this is an artificial key, to match the REST list of dict structure + origin = dict( + volume=dict(name=self.parameters['origin_volume']), + svm=dict(name=self.parameters['origin_vserver']) + ) + body[value] = [origin] + return body + + def flexcache_rest_create(self): + ''' use POST to create a FlexCache ''' + mappings = dict( + name='name', + vserver='svm.name', + junction_path='path', + size='size', + aggr_list='aggregates', + aggr_list_multiplier='constituents_per_aggregate', + origins='origins', + prepopulate='prepopulate' + ) + body = self.flexcache_rest_create_body(mappings) + api = 'storage/flexcache/flexcaches' + response, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=self.parameters['time_out']) + self.na_helper.fail_on_error(error) + return response + + def flexcache_rest_modify(self, uuid): + ''' use PATCH to start prepopulating a FlexCache ''' + mappings = dict( # name cannot be set, though swagger example shows it + prepopulate='prepopulate' + ) + body = self.flexcache_rest_create_body(mappings) + api = 'storage/flexcache/flexcaches' + response, error = rest_generic.patch_async(self.rest_api, api, uuid, body, job_timeout=self.parameters['time_out']) + self.na_helper.fail_on_error(error) + return response + + def flexcache_create_async(self): + """ + Create a FlexCache relationship + """ + options = {'origin-volume': self.parameters['origin_volume'], + 'origin-vserver': self.parameters['origin_vserver'], + 'volume': self.parameters['name']} + self.add_parameter_to_dict(options, 'junction_path', 'junction-path') + self.add_parameter_to_dict(options, 'auto_provision_as', 'auto-provision-as') + self.add_parameter_to_dict(options, 'size', 'size', tostr=True) + if self.parameters.get('aggr_list') and self.parameters.get('aggr_list_multiplier'): + self.add_parameter_to_dict(options, 'aggr_list_multiplier', 'aggr-list-multiplier', tostr=True) + flexcache_create = netapp_utils.zapi.NaElement.create_node_with_children('flexcache-create-async', **options) + if self.parameters.get('aggr_list'): + aggregates = netapp_utils.zapi.NaElement('aggr-list') + for aggregate in self.parameters['aggr_list']: + aggregates.add_new_child('aggr-name', aggregate) + flexcache_create.add_child_elem(aggregates) + try: + result = self.server.invoke_successfully(flexcache_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating FlexCache: %s' % to_native(error), + exception=traceback.format_exc()) + results = {} + for key in ('result-status', 'result-jobid'): + if result.get_child_by_name(key): + results[key] = result[key] + return results + + def flexcache_create(self): + """ + Create a FlexCache relationship + Check job status + """ + if self.use_rest: + return self.flexcache_rest_create() + + results = self.flexcache_create_async() + status = results.get('result-status') + if status == 'in_progress' and 'result-jobid' in results: + if self.parameters['time_out'] == 0: + # asynchronous call, assuming success! + return + error = self.check_job_status(results['result-jobid']) + if error is None: + return + else: + self.module.fail_json(msg='Error when creating flexcache: %s' % error) + self.module.fail_json(msg='Unexpected error when creating flexcache: results is: %s' % repr(results)) + + def flexcache_delete_async(self): + """ + Delete FlexCache relationship at destination cluster + """ + options = {'volume': self.parameters['name']} + flexcache_delete = netapp_utils.zapi.NaElement.create_node_with_children('flexcache-destroy-async', **options) + try: + result = self.server.invoke_successfully(flexcache_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting FlexCache: %s' % (to_native(error)), + exception=traceback.format_exc()) + results = {} + for key in ('result-status', 'result-jobid'): + if result.get_child_by_name(key): + results[key] = result[key] + return results + + def rest_offline_volume(self, current): + """ + Offline the volume using REST PATCH method. + """ + uuid = current.get('uuid') + if uuid is None: + error = 'Error, no uuid in current: %s' % str(current) + self.na_helper.fail_on_error(error) + body = dict(state='offline') + return self.patch_volume_rest(uuid, body) + + def volume_offline(self, current): + """ + Offline FlexCache volume at destination cluster + """ + if self.use_rest: + self.rest_offline_volume(current) + else: + options = {'name': self.parameters['name']} + xml = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-offline', **options) + try: + self.server.invoke_successfully(xml, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error offlining FlexCache volume: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def rest_mount_volume(self, current, path): + """ + Mount the volume using REST PATCH method. + If path is empty string, unmount the volume. + """ + uuid = current.get('uuid') + if uuid is None: + error = 'Error, no uuid in current: %s' % str(current) + self.na_helper.fail_on_error(error) + body = dict(nas=dict(path=path)) + return self.patch_volume_rest(uuid, body) + + def rest_unmount_volume(self, current): + """ + Unmount the volume using REST PATCH method. + """ + self.rest_mount_volume(current, '') if current.get('junction_path') else None + + def volume_unmount(self, current): + """ + Unmount FlexCache volume at destination cluster + """ + if self.use_rest: + self.rest_unmount_volume(current) + else: + options = {'volume-name': self.parameters['name']} + xml = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-unmount', **options) + try: + self.server.invoke_successfully(xml, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error unmounting FlexCache volume: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def patch_volume_rest(self, uuid, body): + response, error = rest_volume.patch_volume(self.rest_api, uuid, body) + self.na_helper.fail_on_error(error) + return response + + def flexcache_rest_delete(self, current): + """ + Delete the flexcache using REST DELETE method. + """ + response = None + uuid = current.get('uuid') + if uuid is None: + error = 'Error, no uuid in current: %s' % str(current) + self.na_helper.fail_on_error(error) + api = 'storage/flexcache/flexcaches' + # There may be a bug in ONTAP. If return_timeout is >= 15, the call fails with uuid not found! + # With 5, a job is queued, and completes with success. With a big enough value, no job is + # queued, and the API returns in around 15 seconds with a not found error. + rto = netapp_utils.get_feature(self.module, 'flexcache_delete_return_timeout') + response, error = rest_generic.delete_async(self.rest_api, api, uuid, timeout=rto, job_timeout=self.parameters['time_out']) + self.na_helper.fail_on_error(error) + return response + + def flexcache_delete(self, current): + """ + Delete FlexCache relationship at destination cluster + Check job status + """ + if self.parameters['force_unmount']: + self.volume_unmount(current) + if self.parameters['force_offline']: + self.volume_offline(current) + if self.use_rest: + return self.flexcache_rest_delete(current) + results = self.flexcache_delete_async() + status = results.get('result-status') + if status == 'in_progress' and 'result-jobid' in results: + if self.parameters['time_out'] == 0: + # asynchronous call, assuming success! + return None + error = self.check_job_status(results['result-jobid']) + if error is not None: + self.module.fail_json(msg='Error when deleting flexcache: %s' % error) + return None + self.module.fail_json(msg='Unexpected error when deleting flexcache: results is: %s' % repr(results)) + + def check_parameters(self, cd_action): + """ + Validate parameters and fail if one or more required params are missing + """ + if cd_action != 'create': + return + if self.parameters['state'] == 'present': + expected = 'origin_volume', 'origin_vserver' + missings = [param for param in expected if not self.parameters.get(param)] + if missings: + plural = 's' if len(missings) > 1 else '' + msg = 'Missing parameter%s: %s' % (plural, ', '.join(missings)) + self.module.fail_json(msg=msg) + + def apply(self): + """ + Apply action to FlexCache + """ + current = self.flexcache_get() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify, mount_unmount = None, None + prepopulate_if_already_created = None + + if self.parameters['state'] == 'present' and 'prepopulate' in self.parameters: + prepopulate_if_already_created = self.parameters['prepopulate'].pop('force_prepopulate_if_already_created') + + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify and self.use_rest: + mount_unmount = modify.pop('junction_path', None) + if modify: + self.module.fail_json(msg='FlexCache properties cannot be modified by this module. modify: %s' % str(modify)) + if current and prepopulate_if_already_created: + # force a prepopulate action + modify = dict(prepopulate=self.parameters['prepopulate']) + self.na_helper.changed = True + self.module.warn('na_ontap_flexcache is not idempotent when prepopulate is present and force_prepopulate_if_already_created=true') + if mount_unmount == '' or current['junction_path'] == '': + self.module.warn('prepopulate requires the FlexCache volume to be mounted') + self.check_parameters(cd_action) + response = None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + response = self.flexcache_create() + elif cd_action == 'delete': + response = self.flexcache_delete(current) + else: + if mount_unmount is not None: + # mount first, as this is required for prepopulate to succeed (or fail for unmount) + self.rest_mount_volume(current, mount_unmount) + if modify: + response = self.flexcache_rest_modify(current['uuid']) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, response) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + my_obj = NetAppONTAPFlexCache() + my_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py new file mode 100644 index 000000000..fa7629fdb --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_event.py @@ -0,0 +1,444 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_fpolicy_event +short_description: NetApp ONTAP FPolicy policy event configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify an FPolicy policy event. +options: + state: + description: + - Whether the FPolicy policy event is present or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - The name of the vserver to create the event on. + required: true + type: str + + name: + description: + - Name of the Event. + required: true + type: str + + file_operations: + description: + - Name of file operations to be applied to the event. By default no operations are monitored. + type: list + elements: 'str' + choices: ['close', 'create', 'create_dir', 'delete', 'delete_dir', 'getattr', 'link', 'lookup', + 'open', 'read', 'write', 'rename', 'rename_dir', 'setattr', 'symlink'] + + filters: + description: + - Name of filters to be applied to the event. It is notification filtering parameters. By default no filters are selected. + type: list + elements: 'str' + choices: ['monitor_ads', 'close_with_modification', 'close_without_modification', 'first_read', 'first_write', 'offline_bit', 'open_with_delete_intent', + 'open_with_write_intent', 'write_with_size_change', 'close_with_read', 'setattr_with_owner_change', 'setattr_with_group_change', + 'setattr_with_sacl_change', 'setattr_with_dacl_change', 'setattr_with_modify_time_change', 'setattr_with_access_time_change', + 'setattr_with_creation_time_change', 'setattr_with_mode_change', 'setattr_with_size_change', 'setattr_with_allocation_size_change', 'exclude_directory'] + + protocol: + description: + - Name of protocol for which event is created. By default no protocol is selected. + choices: ['cifs', 'nfsv3', 'nfsv4'] + type: str + + volume_monitoring: + description: + - Indicator if the volume operation required for the event. If not specified the default Value is false. + type: bool + +notes: +- Support check_mode. +''' + +EXAMPLES = """ +- name: Create FPolicy Event + na_ontap_fpolicy_event: + state: present + vserver: svm1 + name: fpolicy_event + file_operations: ['create', 'create_dir', 'delete', 'delete_dir', 'read', 'close', 'rename', 'rename_dir'] + filters: ['first_read', 'close_with_modification'] + protocol: cifs + volume_monitoring: false + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Modify FPolicy Event + na_ontap_fpolicy_event: + state: present + vserver: svm1 + name: fpolicy_event + volume_monitoring: true + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Delete FPolicy Event + na_ontap_fpolicy_event: + state: absent + vserver: svm1 + name: fpolicy_event + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +""" + +RETURN = """ # """ + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFpolicyEvent(): + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], type='str', default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + file_operations=dict( + required=False, + type='list', + elements='str', + choices=['close', 'create', 'create_dir', 'delete', 'delete_dir', 'getattr', 'link', + 'lookup', 'open', 'read', 'write', 'rename', 'rename_dir', 'setattr', 'symlink']), + filters=dict( + required=False, + type='list', + elements='str', + choices=['monitor_ads', 'close_with_modification', 'close_without_modification', 'first_read', + 'first_write', 'offline_bit', 'open_with_delete_intent', 'open_with_write_intent', 'write_with_size_change', 'close_with_read', + 'setattr_with_owner_change', 'setattr_with_group_change', 'setattr_with_sacl_change', 'setattr_with_dacl_change', + 'setattr_with_modify_time_change', 'setattr_with_access_time_change', 'setattr_with_creation_time_change', 'setattr_with_mode_change', + 'setattr_with_size_change', 'setattr_with_allocation_size_change', 'exclude_directory']), + protocol=dict(required=False, type='str', choices=['cifs', 'nfsv3', 'nfsv4']), + volume_monitoring=dict(required=False, type='bool') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_together=[ + ('protocol', 'file_operations')] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.vserver_uuid = None + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_vserver_uuid(self): + """ + Get vserver uuid, used for API calls. + """ + api = "/svm/svms" + query = { + 'name': self.parameters['vserver'] + } + message, error = self.rest_api.get(api, query) + if error: + self.module.fail_json(msg=error) + # if vserver does not exist we expect message to be a dict containing 'records': [] + if not message['records']: + self.module.fail_json(msg="vserver does not exist") + + return message['records'][0]['uuid'] + + def list_to_dict(self, params): + """ + Converts a list of entries to a dictionary with the key as the parameter name and the value as True as expected by the REST API + """ + return dict((param, True) for param in params) + + def get_fpolicy_event(self): + """ + Get FPolicy event configuration if an event matching the parameters exists + """ + return_value = None + if self.use_rest: + api = "/protocols/fpolicy/%s/events" % (self.vserver_uuid) + query = { + 'fields': 'protocol,filters,file_operations,volume_monitoring' + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + if error: + self.module.fail_json(msg=error) + if records is not None: + for record in records: + if record['name'] == self.parameters['name']: + return_value = {} + for parameter in ('protocol', 'volume_monitoring'): + return_value[parameter] = [] + if parameter in record: + return_value[parameter] = record[parameter] + # file_operations and filters contains a dict all possible choices as the keys and True/False as the values. + # Return a list of the choices that are True. + return_value['file_operations'] = [] + if 'file_operations' in record: + file_operation_list = [file_operation for file_operation, enabled in record['file_operations'].items() if enabled] + return_value['file_operations'] = file_operation_list + + return_value['filters'] = [] + if 'filters' in record: + filters_list = [filter for filter, enabled in record['filters'].items() if enabled] + return_value['filters'] = filters_list + + return return_value + + else: + fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-get-iter') + fpolicy_event_config = netapp_utils.zapi.NaElement('fpolicy-event-options-config') + fpolicy_event_config.add_new_child('event-name', self.parameters['name']) + fpolicy_event_config.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(fpolicy_event_config) + fpolicy_event_obj.add_child_elem(query) + + try: + result = self.server.invoke_successfully(fpolicy_event_obj, True) + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error searching for FPolicy policy event %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc() + ) + + if result.get_child_by_name('attributes-list'): + fpolicy_event_attributes = result['attributes-list']['fpolicy-event-options-config'] + + # get file operations as list + file_operations = [] + if fpolicy_event_attributes.get_child_by_name('file-operations'): + for file_operation in fpolicy_event_attributes.get_child_by_name('file-operations').get_children(): + file_operations.append(file_operation.get_content()) + + # get filter string as list + filters = [] + if fpolicy_event_attributes.get_child_by_name('filter-string'): + for filter in fpolicy_event_attributes.get_child_by_name('filter-string').get_children(): + filters.append(filter.get_content()) + + protocol = "" + if fpolicy_event_attributes.get_child_by_name('protocol'): + protocol = fpolicy_event_attributes.get_child_content('protocol') + + return_value = { + 'vserver': fpolicy_event_attributes.get_child_content('vserver'), + 'name': fpolicy_event_attributes.get_child_content('event-name'), + 'file_operations': file_operations, + 'filters': filters, + 'protocol': protocol, + 'volume_monitoring': self.na_helper.get_value_for_bool( + from_zapi=True, value=fpolicy_event_attributes.get_child_content('volume-operation') + ) + } + + return return_value + + def create_fpolicy_event(self): + """ + Create an FPolicy policy event + :return: nothing + """ + if self.use_rest: + api = "/protocols/fpolicy/%s/events" % (self.vserver_uuid) + body = { + 'name': self.parameters['name'] + } + + if 'protocol' in self.parameters: + body['protocol'] = self.parameters['protocol'] + if 'volume_monitoring' in self.parameters: + body['volume_monitoring'] = self.parameters['volume_monitoring'] + + if 'filters' in self.parameters: + body['filters'] = self.list_to_dict(self.parameters['filters']) + if 'file_operations' in self.parameters: + body['file_operations'] = self.list_to_dict(self.parameters['file_operations']) + + dummy, error = self.rest_api.post(api, body) + + if error: + self.module.fail_json(msg=error) + + else: + fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-create') + fpolicy_event_obj.add_new_child('event-name', self.parameters['name']) + + if 'file_operations' in self.parameters: + + file_operation_obj = netapp_utils.zapi.NaElement('file-operations') + + for file_operation in self.parameters['file_operations']: + file_operation_obj.add_new_child('fpolicy-operation', file_operation) + fpolicy_event_obj.add_child_elem(file_operation_obj) + + if 'filters' in self.parameters: + + filter_string_obj = netapp_utils.zapi.NaElement('filter-string') + + for filter in self.parameters['filters']: + filter_string_obj.add_new_child('fpolicy-filter', filter) + fpolicy_event_obj.add_child_elem(filter_string_obj) + + if 'protocol' in self.parameters: + fpolicy_event_obj.add_new_child('protocol', self.parameters['protocol']) + + if 'volume_monitoring' in self.parameters: + fpolicy_event_obj.add_new_child( + 'volume-operation', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['volume_monitoring']) + ) + + try: + self.server.invoke_successfully(fpolicy_event_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating fPolicy policy event %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def modify_fpolicy_event(self, modify): + """ + Modify an FPolicy policy event + :return: nothing + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/event" + query = { + 'vserver': self.parameters['vserver'], + 'event-name': self.parameters['name'] + } + body = {} + # protocol and file_operations must be parsed into the API together + # if filters exists filters,protocol and file_operations must be parsed together. + for parameter in 'protocol', 'filters', 'file_operations': + if parameter in modify: + body[parameter] = modify[parameter] + elif parameter in self.parameters: + body[parameter] = self.parameters[parameter] + if 'volume_monitoring' in modify: + body['volume-operation'] = modify['volume_monitoring'] + + dummy, error = self.rest_api.patch(api, body, query) + if error: + self.module.fail_json(msg=error) + + else: + fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-modify') + fpolicy_event_obj.add_new_child('event-name', self.parameters['name']) + + if 'file_operations' in self.parameters: + file_operation_obj = netapp_utils.zapi.NaElement('file-operations') + for file_operation in self.parameters['file_operations']: + file_operation_obj.add_new_child('fpolicy-operation', file_operation) + fpolicy_event_obj.add_child_elem(file_operation_obj) + + if 'filters' in self.parameters: + filter_string_obj = netapp_utils.zapi.NaElement('filter-string') + for filter in self.parameters['filters']: + filter_string_obj.add_new_child('fpolicy-filter', filter) + fpolicy_event_obj.add_child_elem(filter_string_obj) + + if 'protocol' in self.parameters: + fpolicy_event_obj.add_new_child('protocol', self.parameters['protocol']) + + if 'volume_monitoring' in self.parameters: + fpolicy_event_obj.add_new_child( + 'volume-operation', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['volume_monitoring']) + ) + + try: + self.server.invoke_successfully(fpolicy_event_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying fPolicy policy event %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def delete_fpolicy_event(self): + """ + Delete an FPolicy policy event + :return: nothing + """ + if self.use_rest: + api = "/protocols/fpolicy/%s/events/%s" % (self.vserver_uuid, self.parameters['name']) + + dummy, error = self.rest_api.delete(api) + if error: + self.module.fail_json(msg=error) + else: + fpolicy_event_obj = netapp_utils.zapi.NaElement('fpolicy-policy-event-delete') + fpolicy_event_obj.add_new_child('event-name', self.parameters['name']) + + try: + self.server.invoke_successfully(fpolicy_event_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting fPolicy policy event %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], + to_native(error)), exception=traceback.format_exc() + ) + + def apply(self): + if self.use_rest: + self.vserver_uuid = self.get_vserver_uuid() + + current, modify = self.get_fpolicy_event(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.create_fpolicy_event() + elif cd_action == 'delete': + self.delete_fpolicy_event() + elif modify: + self.modify_fpolicy_event(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapFpolicyEvent() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py new file mode 100644 index 000000000..ff059096f --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_ext_engine.py @@ -0,0 +1,520 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_fpolicy_ext_engine +short_description: NetApp ONTAP fPolicy external engine configuration. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, delete or modify fpolicy external engine. +options: + state: + description: + - Whether the fPolicy external engine is present or not + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - the name of the vserver to create the external engine on + required: true + type: str + + name: + description: + - Name of the external engine. + required: true + type: str + + certificate_ca: + description: + - Certificate authority name. No default value is set for this field. + type: str + + certificate_common_name: + description: + - FQDN or custom common name of certificate. No default value is set for this field. + type: str + + certificate_serial: + description: + - Serial number of certificate. No default value is set for this field. + type: str + + extern_engine_type: + description: + - External engine type. If the engine is asynchronous, no reply is sent from FPolicy servers. Default value set for this field is synchronous. + choices: ['synchronous', 'asynchronous'] + type: str + + is_resiliency_enabled: + description: + - Indicates if the resiliency with this engine is required. + - If set to true, the notifications will be stored in a path as resiliency_directory_path + - If it is false, the notifications will not be stored. Default value is false. + type: bool + + max_connection_retries: + description: + - Number of times storage appliance will attempt to establish a broken connection to FPolicy server. Default value set for this field is 5. + type: int + + max_server_reqs: + description: + - Maximum number of outstanding screen requests that will be queued for an FPolicy Server. Default value set for this field is 50. + type: int + + port: + description: + - Port number of the FPolicy server application. + type: int + + primary_servers: + description: + - Primary FPolicy servers. + type: list + elements: str + + recv_buffer_size: + description: + - Receive buffer size of connected socket for FPolicy Server. Default value set for this field is 256 kilobytes (256Kb). + type: int + + resiliency_directory_path: + description: + - Directory path under Vserver for storing file access notifications. File access notifications will be stored in a generated file during the outage time. + - The path is the full, user visible path relative to the Vserver root, and it might be crossing junction mount points. + type: str + + secondary_servers: + description: + - Secondary FPolicy servers. No default value is set for this field. + type: list + elements: str + + send_buffer_size: + description: + - Send buffer size of connected socket for FPolicy Server. Default value set for this field is 256 kilobytes (256Kb). + type: int + + ssl_option: + description: + - SSL option for external communication. No default value is set for this field + choices: ['no_auth', 'server_auth', 'mutual_auth'] + type: str + +''' + +EXAMPLES = """ +- name: Create fPolicy external engine + na_ontap_fpolicy_ext_engine: + state: present + vserver: svm1 + name: fpolicy_ext_engine + port: 8787 + extern_engine_type: asynchronous + primary_servers: ['10.11.12.13', '10.11.12.14'] + ssl_option: no_auth + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Modify fPolicy external engine + na_ontap_fpolicy_ext_engine: + state: present + vserver: svm1 + name: fpolicy_ext_engine + port: 7878 + extern_engine_type: synchronous + primary_servers: ['10.11.12.15', '10.11.12.16'] + ssl_option: server_auth + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Delete fPolicy external engine + na_ontap_fpolicy_ext_engine: + state: absent + vserver: svm1 + name: fpolicy_engine + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFpolicyExtEngine(): + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + certificate_ca=dict(required=False, type='str'), + certificate_common_name=dict(required=False, type='str'), + certificate_serial=dict(required=False, type='str'), + extern_engine_type=dict(required=False, type='str', choices=['synchronous', 'asynchronous']), + is_resiliency_enabled=dict(required=False, type='bool'), + max_connection_retries=dict(required=False, type='int'), + max_server_reqs=dict(required=False, type='int'), + port=dict(required=False, type='int'), + primary_servers=dict(required=False, type='list', elements='str'), + recv_buffer_size=dict(required=False, type='int'), + resiliency_directory_path=dict(required=False, type='str'), + secondary_servers=dict(required=False, type='list', elements='str'), + send_buffer_size=dict(required=False, type='int'), + ssl_option=dict(required=False, type='str', choices=['no_auth', 'server_auth', 'mutual_auth']), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[('state', 'present', ['ssl_option', 'primary_servers', 'port'])], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def create_rest_body(self): + """ + Create an fPolicy body for a create operation + :return: body as dict + """ + + body = { + 'vserver': self.parameters['vserver'], + 'engine-name': self.parameters['name'], + 'primary_servers': self.parameters['primary_servers'], + 'port': self.parameters['port'], + 'ssl_option': self.parameters['ssl_option'] + } + + list_of_options = ['secondary_servers', 'is_resiliency_enabled', 'resiliency_directory_path', + 'max_connection_retries', 'max_server_reqs', 'recv_buffer_size', 'send_buffer_size', + 'certificate_ca', 'certificate_common_name', 'certificate_serial', 'extern_engine_type'] + + for option in list_of_options: + if option in self.parameters: + body[option] = self.parameters[option] + + return body + + def create_zapi_api(self, api): + """ + Create an the ZAPI API request for fpolicy modify and create + :return: ZAPI API object + """ + fpolicy_ext_engine_obj = netapp_utils.zapi.NaElement(api) + fpolicy_ext_engine_obj.add_new_child('engine-name', self.parameters['name']) + fpolicy_ext_engine_obj.add_new_child('port-number', self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['port'])) + fpolicy_ext_engine_obj.add_new_child('ssl-option', self.parameters['ssl_option']) + + primary_servers_obj = netapp_utils.zapi.NaElement('primary-servers') + + for primary_server in self.parameters['primary_servers']: + primary_servers_obj.add_new_child('ip-address', primary_server) + fpolicy_ext_engine_obj.add_child_elem(primary_servers_obj) + + if 'secondary_servers' in self.parameters: + secondary_servers_obj = netapp_utils.zapi.NaElement('secondary-servers') + + for secondary_server in self.parameters['secondary_servers']: + primary_servers_obj.add_new_child('ip-address', secondary_server) + fpolicy_ext_engine_obj.add_child_elem(secondary_servers_obj) + + if 'is_resiliency_enabled' in self.parameters: + fpolicy_ext_engine_obj.add_new_child( + 'is-resiliency-enabled', + self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_resiliency_enabled']) + ) + if 'resiliency_directory_path' in self.parameters: + fpolicy_ext_engine_obj.add_new_child('resiliency-directory-path', self.parameters['resiliency_directory_path']) + if 'max_connection_retries' in self.parameters: + fpolicy_ext_engine_obj.add_new_child( + 'max-connection-retries', + self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['max_connection_retries']) + ) + if 'max_server_reqs' in self.parameters: + fpolicy_ext_engine_obj.add_new_child( + 'max-server-requests', + self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['max_server_reqs']) + ) + if 'recv_buffer_size' in self.parameters: + fpolicy_ext_engine_obj.add_new_child( + 'recv-buffer-size', + self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['recv_buffer_size']) + ) + if 'send_buffer_size' in self.parameters: + fpolicy_ext_engine_obj.add_new_child( + 'send-buffer-size', + self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters['send_buffer_size']) + ) + if 'certificate_ca' in self.parameters: + fpolicy_ext_engine_obj.add_new_child('certificate-ca', self.parameters['certificate_ca']) + if 'certificate_common_name' in self.parameters: + fpolicy_ext_engine_obj.add_new_child('certificate-common-name', self.parameters['certificate_common_name']) + if 'certificate_serial' in self.parameters: + fpolicy_ext_engine_obj.add_new_child('certificate-serial', self.parameters['certificate_serial']) + if 'extern_engine_type' in self.parameters: + fpolicy_ext_engine_obj.add_new_child('extern-engine-type', self.parameters['extern_engine_type']) + + return fpolicy_ext_engine_obj + + def create_fpolicy_ext_engine(self): + """ + Create an fPolicy external engine + :return: nothing + """ + + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/external-engine" + body = self.create_rest_body() + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + else: + fpolicy_ext_engine_obj = self.create_zapi_api('fpolicy-policy-external-engine-create') + + try: + self.server.invoke_successfully(fpolicy_ext_engine_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error creating fPolicy external engine %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def modify_fpolicy_ext_engine(self, modify): + """ + Modify an fPolicy external engine + :return: nothing + """ + + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/external-engine" + query = { + 'vserver': self.parameters['vserver'], + 'engine-name': self.parameters['name'] + } + + dummy, error = self.rest_api.patch(api, modify, query) + if error: + self.module.fail_json(msg=error) + else: + fpolicy_ext_engine_obj = self.create_zapi_api('fpolicy-policy-external-engine-modify') + + try: + self.server.invoke_successfully(fpolicy_ext_engine_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error modifying fPolicy external engine %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def delete_fpolicy_ext_engine(self): + """ + Delete an fPolicy external engine + :return: nothing + """ + + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/external-engine" + query = { + 'vserver': self.parameters['vserver'], + 'engine-name': self.parameters['name'] + } + + dummy, error = self.rest_api.delete(api, query) + + if error: + self.module.fail_json(msg=error) + else: + + fpolicy_ext_engine_obj = netapp_utils.zapi.NaElement('fpolicy-policy-external-engine-delete') + fpolicy_ext_engine_obj.add_new_child('engine-name', self.parameters['name']) + + try: + self.server.invoke_successfully(fpolicy_ext_engine_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error deleting fPolicy external engine %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def get_fpolicy_ext_engine(self): + """ + Check to see if the fPolicy external engine exists or not + :return: dict of engine properties if exist, None if not + """ + return_value = None + + if self.use_rest: + fields = [ + "vserver", + "engine-name", + "primary-servers", + "port", + "secondary-servers", + "extern-engine-type", + "ssl-option", + "max-connection-retries", + "max-server-reqs", + "certificate-common-name", + "certificate-serial", + "certificate-ca", + "recv-buffer-size", + "send-buffer-size", + "is-resiliency-enabled", + "resiliency-directory-path" + ] + + api = "private/cli/vserver/fpolicy/policy/external-engine" + query = { + 'fields': ','.join(fields), + 'engine-name': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + message, error = self.rest_api.get(api, query) + + return_info, error = rrh.check_for_0_or_1_records(api, message, error) + if return_info is None: + return None + + return_value = message['records'][0] + return return_value + else: + + fpolicy_ext_engine_obj = netapp_utils.zapi.NaElement('fpolicy-policy-external-engine-get-iter') + fpolicy_ext_engine_config = netapp_utils.zapi.NaElement('fpolicy-external-engine-info') + fpolicy_ext_engine_config.add_new_child('engine-name', self.parameters['name']) + fpolicy_ext_engine_config.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(fpolicy_ext_engine_config) + fpolicy_ext_engine_obj.add_child_elem(query) + + try: + result = self.server.invoke_successfully(fpolicy_ext_engine_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error searching for fPolicy engine %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + if result.get_child_by_name('attributes-list'): + fpolicy_ext_engine_attributes = result['attributes-list']['fpolicy-external-engine-info'] + + primary_servers = [] + primary_servers_elem = fpolicy_ext_engine_attributes.get_child_by_name('primary-servers') + for primary_server in primary_servers_elem.get_children(): + primary_servers.append(primary_server.get_content()) + + secondary_servers = [] + if fpolicy_ext_engine_attributes.get_child_by_name('secondary-servers'): + secondary_servers_elem = fpolicy_ext_engine_attributes.get_child_by_name('secondary-servers') + + for secondary_server in secondary_servers_elem.get_children(): + secondary_servers.append(secondary_server.get_content()) + + return_value = { + 'vserver': fpolicy_ext_engine_attributes.get_child_content('vserver'), + 'name': fpolicy_ext_engine_attributes.get_child_content('engine-name'), + 'certificate_ca': fpolicy_ext_engine_attributes.get_child_content('certificate-ca'), + 'certificate_common_name': fpolicy_ext_engine_attributes.get_child_content('certificate-common-name'), + 'certificate_serial': fpolicy_ext_engine_attributes.get_child_content('certificate-serial'), + 'extern_engine_type': fpolicy_ext_engine_attributes.get_child_content('extern-engine-type'), + 'is_resiliency_enabled': self.na_helper.get_value_for_bool( + from_zapi=True, + value=fpolicy_ext_engine_attributes.get_child_content('is-resiliency-enabled') + ), + 'max_connection_retries': self.na_helper.get_value_for_int( + from_zapi=True, + value=fpolicy_ext_engine_attributes.get_child_content('max-connection-retries') + ), + 'max_server_reqs': self.na_helper.get_value_for_int( + from_zapi=True, + value=fpolicy_ext_engine_attributes.get_child_content('max-server-requests') + ), + 'port': self.na_helper.get_value_for_int( + from_zapi=True, + value=fpolicy_ext_engine_attributes.get_child_content('port-number') + ), + 'primary_servers': primary_servers, + 'secondary_servers': secondary_servers, + 'recv_buffer_size': self.na_helper.get_value_for_int( + from_zapi=True, + value=fpolicy_ext_engine_attributes.get_child_content('recv-buffer-size') + ), + 'resiliency_directory_path': fpolicy_ext_engine_attributes.get_child_content('resiliency-directory-path'), + 'send_buffer_size': self.na_helper.get_value_for_int( + from_zapi=True, + value=fpolicy_ext_engine_attributes.get_child_content('send-buffer-size') + ), + 'ssl_option': fpolicy_ext_engine_attributes.get_child_content('ssl-option'), + } + + return return_value + + def apply(self): + current, modify = self.get_fpolicy_ext_engine(), None + + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.create_fpolicy_ext_engine() + elif cd_action == 'delete': + self.delete_fpolicy_ext_engine() + elif modify: + self.modify_fpolicy_ext_engine(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapFpolicyExtEngine() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py new file mode 100644 index 000000000..47d9143cb --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_policy.py @@ -0,0 +1,378 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: na_ontap_fpolicy_policy +short_description: NetApp ONTAP - Create, delete or modify an FPolicy policy. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify an FPolicy policy. Fpolicy scope must exist before running this module. +- FPolicy is a file access notification framework that enables an administrator to monitor file and directory access in storage configured for CIFS and NFS. +options: + state: + description: + - Whether the fPolicy policy should exist or not + choices: ['present', 'absent'] + type: str + default: present + + vserver: + description: + - the name of the vserver to create the policy on + type: str + required: True + + name: + description: + - Name of the policy. + type: str + required: True + + allow_privileged_access: + description: + - Specifies if privileged access should be given to FPolicy servers registered for the policy. + type: bool + + engine: + description: + - Name of the Engine. External engines must be created prior to running this task. + type: str + + events: + description: + - Events for file access monitoring. + type: list + elements: str + required: True + + is_mandatory: + description: + - Specifies the action to take on a file access event in the case when all primary and secondary servers are down or no response is received from the + - FPolicy servers within a given timeout period. When True, file access events will be denied under these circumstances + type: bool + + is_passthrough_read_enabled: + description: + - Specifies if passthrough-read should be allowed to FPolicy servers registered for the policy. + type: bool + + privileged_user_name: + description: + - User name for privileged access. + type: str + +''' + +EXAMPLES = """ +- name: Create FPolicy policy + na_ontap_fpolicy_policy: + state: present + vserver: svm1 + name: fpolicy_policy + events: fcpolicy_event + engine: fpolicy_ext_engine + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Modify FPolicy policy + na_ontap_fpolicy_policy: + state: present + vserver: svm1 + name: fpolicy_policy + events: fcpolicy_event + is_mandatory: false + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Delete FPolicy policy + na_ontap_fpolicy_policy: + state: absent + vserver: svm1 + name: fpolicy_policy + events: fcpolicy_event + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + + +class NetAppOntapFpolicyPolicy(): + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + allow_privileged_access=dict(required=False, type='bool'), + engine=dict(required=False, type='str'), + events=dict(required=True, type='list', elements='str'), + is_mandatory=dict(required=False, type='bool'), + is_passthrough_read_enabled=dict(required=False, type='bool'), + privileged_user_name=dict(required=False, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_fpolicy_policy(self): + """ + Check if FPolicy policy exists, if it exists get the current state of the policy. + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy" + query = { + 'vserver': self.parameters['vserver'], + 'policy-name': self.parameters['name'], + 'fields': 'events,engine,allow-privileged-access,is-mandatory,is-passthrough-read-enabled,privileged-user-name' + } + + message, error = self.rest_api.get(api, query) + if error: + self.module.fail_json(msg=error) + if len(message.keys()) == 0: + return None + if 'records' in message and len(message['records']) == 0: + return None + if 'records' not in message: + error = "Unexpected response in get_fpolicy_policy from %s: %s" % (api, repr(message)) + self.module.fail_json(msg=error) + return_value = { + 'vserver': message['records'][0]['vserver'], + 'name': message['records'][0]['policy_name'], + 'events': message['records'][0]['events'], + 'allow_privileged_access': message['records'][0]['allow_privileged_access'], + 'engine': message['records'][0]['engine'], + 'is_mandatory': message['records'][0]['is_mandatory'], + 'is_passthrough_read_enabled': message['records'][0]['is_passthrough_read_enabled'] + } + if 'privileged_user_name' in message['records'][0]: + return_value['privileged_user_name'] = message['records'][0]['privileged_user_name'] + + return return_value + + else: + return_value = None + + fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-get-iter') + fpolicy_policy_config = netapp_utils.zapi.NaElement('fpolicy-policy-info') + fpolicy_policy_config.add_new_child('policy-name', self.parameters['name']) + fpolicy_policy_config.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(fpolicy_policy_config) + fpolicy_policy_obj.add_child_elem(query) + + try: + result = self.server.invoke_successfully(fpolicy_policy_obj, True) + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error searching for fPolicy policy %s on vserver %s: %s' % (self.parameters['name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('attributes-list'): + fpolicy_policy_attributes = result['attributes-list']['fpolicy-policy-info'] + events = [] + if fpolicy_policy_attributes.get_child_by_name('events'): + for event in fpolicy_policy_attributes.get_child_by_name('events').get_children(): + events.append(event.get_content()) + + return_value = { + 'vserver': fpolicy_policy_attributes.get_child_content('vserver'), + 'name': fpolicy_policy_attributes.get_child_content('policy-name'), + 'events': events, + 'allow_privileged_access': self.na_helper.get_value_for_bool( + from_zapi=True, value=fpolicy_policy_attributes.get_child_content('allow-privileged-access')), + 'engine': fpolicy_policy_attributes.get_child_content('engine-name'), + 'is_mandatory': self.na_helper.get_value_for_bool( + from_zapi=True, value=fpolicy_policy_attributes.get_child_content('is-mandatory')), + 'is_passthrough_read_enabled': self.na_helper.get_value_for_bool( + from_zapi=True, value=fpolicy_policy_attributes.get_child_content('is-passthrough-read-enabled')), + 'privileged_user_name': fpolicy_policy_attributes.get_child_content('privileged-user-name') + } + + return return_value + + def create_fpolicy_policy(self): + """ + Create an FPolicy policy. + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy" + body = { + 'vserver': self.parameters['vserver'], + 'policy-name': self.parameters['name'], + 'events': self.parameters['events'] + } + for parameter in ('engine', 'allow_privileged_access', 'is_mandatory', 'is_passthrough_read_enabled', 'privileged_user_name'): + if parameter in self.parameters: + body[parameter.replace('_', '-')] = self.parameters[parameter] + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + else: + fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-create') + fpolicy_policy_obj.add_new_child('policy-name', self.parameters['name']) + if 'is_mandatory' in self.parameters: + fpolicy_policy_obj.add_new_child('is-mandatory', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_mandatory'])) + if 'engine' in self.parameters: + fpolicy_policy_obj.add_new_child('engine-name', self.parameters['engine']) + if 'allow_privileged_access' in self.parameters: + fpolicy_policy_obj.add_new_child( + 'allow-privileged-access', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['allow_privileged_access']) + ) + if 'is_passthrough_read_enabled' in self.parameters: + fpolicy_policy_obj.add_new_child( + 'is-passthrough-read-enabled', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_passthrough_read_enabled']) + ) + events_obj = netapp_utils.zapi.NaElement('events') + for event in self.parameters['events']: + events_obj.add_new_child('event-name', event) + fpolicy_policy_obj.add_child_elem(events_obj) + + if 'privileged_user_name' in self.parameters: + fpolicy_policy_obj.add_new_child('privileged-user-name', self.parameters['privileged_user_name']) + try: + self.server.invoke_successfully(fpolicy_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error creating fPolicy policy %s on vserver %s: %s' % (self.parameters['name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc() + ) + + def modify_fpolicy_policy(self, modify): + """ + Modify an FPolicy policy. + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy" + query = {'vserver': self.parameters['vserver']} + query['policy-name'] = self.parameters['name'] + dummy, error = self.rest_api.patch(api, modify, query) + if error: + self.module.fail_json(msg=error) + else: + fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-modify') + fpolicy_policy_obj.add_new_child('policy-name', self.parameters['name']) + if 'is_mandatory' in self.parameters: + fpolicy_policy_obj.add_new_child('is-mandatory', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_mandatory'])) + if 'engine' in self.parameters: + fpolicy_policy_obj.add_new_child('engine-name', self.parameters['engine']) + if 'allow_privileged_access' in self.parameters: + fpolicy_policy_obj.add_new_child( + 'allow-privileged-access', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['allow_privileged_access']) + ) + if 'is_passthrough_read_enabled' in self.parameters: + fpolicy_policy_obj.add_new_child( + 'is-passthrough-read-enabled', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_passthrough_read_enabled']) + ) + events_obj = netapp_utils.zapi.NaElement('events') + for event in self.parameters['events']: + events_obj.add_new_child('event-name', event) + fpolicy_policy_obj.add_child_elem(events_obj) + + if 'privileged_user_name' in self.parameters: + fpolicy_policy_obj.add_new_child('privileged-user-name', self.parameters['privileged_user_name']) + try: + self.server.invoke_successfully(fpolicy_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error modifying fPolicy policy %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def delete_fpolicy_policy(self): + """ + Delete an FPolicy policy. + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy" + body = { + 'vserver': self.parameters['vserver'], + 'policy-name': self.parameters['name'] + } + dummy, error = self.rest_api.delete(api, body) + if error: + self.module.fail_json(msg=error) + + else: + fpolicy_policy_obj = netapp_utils.zapi.NaElement('fpolicy-policy-delete') + fpolicy_policy_obj.add_new_child('policy-name', self.parameters['name']) + + try: + self.server.invoke_successfully(fpolicy_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error deleting fPolicy policy %s on vserver %s: %s' % + (self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def apply(self): + current = self.get_fpolicy_policy() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.create_fpolicy_policy() + elif cd_action == 'delete': + self.delete_fpolicy_policy() + elif modify: + self.modify_fpolicy_policy(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapFpolicyPolicy() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py new file mode 100644 index 000000000..a547282a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_scope.py @@ -0,0 +1,516 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_fpolicy_scope +short_description: NetApp ONTAP - Create, delete or modify an FPolicy policy scope configuration. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify an FPolicy policy scope. +options: + state: + description: + - Whether the FPolicy policy scope is present or not + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - the name of the vserver to create the scope on + required: true + type: str + + name: + description: + - Name of the policy. The FPolicy policy must exist for the scope to be created. + required: true + type: str + + check_extensions_on_directories: + description: + - Indicates whether directory names are also subjected to extensions check, similar to file names. + - By default, the value is true if policy is configured with Native engine, false otherwise. + type: bool + + export_policies_to_exclude: + description: + - Export Policies to exclude for file access monitoring. By default no export policy is selected. + type: list + elements: str + + export_policies_to_include: + description: + - Export policies to include for file access monitoring. By default no export policy is selected. + type: list + elements: str + + file_extensions_to_exclude: + description: + - File extensions excluded for screening. By default no file extension is selected. + type: list + elements: str + + file_extensions_to_include: + description: + - File extensions included for screening. By default no file extension is selected. + type: list + elements: str + + is_monitoring_of_objects_with_no_extension_enabled: + description: + - Indicates whether monitoring of objects with no extension is required. By default, the value is false. + type: bool + + shares_to_exclude: + description: + - Shares to exclude for file access monitoring. By default no share is selected. + type: list + elements: str + + shares_to_include: + description: + - Shares to include for file access monitoring. By default no share is selected. + type: list + elements: str + + volumes_to_exclude: + description: + - Volumes that are inactive for the file policy. The list can include items which are regular expressions, such as 'vol*' or 'user?'. + - Note that if a policy has both an exclude list and an include list, the include list is ignored by the filer when processing user requests. + - By default no volume is selected. + type: list + elements: str + + volumes_to_include: + description: + - Volumes that are active for the file policy. The list can include items which are regular expressions, such as 'vol*' or 'user?'. + - By default no volume is selected. + type: list + elements: str + +''' + +EXAMPLES = """ + - name: Create FPolicy scope + na_ontap_fpolicy_scope: + state: present + vserver: GBSMNAS80LD + name: policy1 + export_policies_to_include: export1 + shares_to_include: share1 + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + use_rest: "{{ use_rest }}" + + - name: Modify FPolicy scope + na_ontap_fpolicy_scope: + state: present + vserver: GBSMNAS80LD + name: policy1 + export_policies_to_include: export1,export2 + shares_to_include: share1,share2 + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + use_rest: "{{ use_rest }}" + + - name: Delete FPolicy scope + na_ontap_fpolicy_scope: + state: absent + vserver: GBSMNAS80LD + name: policy1 + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + use_rest: "{{ use_rest }}" + +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapFpolicyScope(): + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + check_extensions_on_directories=dict(required=False, type='bool'), + export_policies_to_exclude=dict(required=False, type='list', elements='str'), + export_policies_to_include=dict(required=False, type='list', elements='str'), + file_extensions_to_exclude=dict(required=False, type='list', elements='str'), + file_extensions_to_include=dict(required=False, type='list', elements='str'), + is_monitoring_of_objects_with_no_extension_enabled=dict(required=False, type='bool'), + shares_to_exclude=dict(required=False, type='list', elements='str'), + shares_to_include=dict(required=False, type='list', elements='str'), + volumes_to_exclude=dict(required=False, type='list', elements='str'), + volumes_to_include=dict(required=False, type='list', elements='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_fpolicy_scope(self): + """ + Check to see if the fPolicy scope exists or not + :return: dict of scope properties if exist, None if not + """ + return_value = None + + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/scope" + query = { + 'vserver': self.parameters['vserver'], + 'policy-name': self.parameters['name'], + 'fields': 'shares-to-include,shares-to-exclude,volumes-to-include,volumes-to-exclude,export-policies-to-include,\ +export-policies-to-exclude,file-extensions-to-include,file-extensions-to-exclude,\ +is-file-extension-check-on-directories-enabled,is-monitoring-of-objects-with-no-extension-enabled' + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + if error: + self.module.fail_json(msg=error) + + if records is not None: + return_value = { + 'name': records[0]['policy_name'], + 'check_extensions_on_directories': records[0]['is_file_extension_check_on_directories_enabled'], + 'is_monitoring_of_objects_with_no_extension_enabled': records[0]['is_monitoring_of_objects_with_no_extension_enabled'] + } + + for field in ( + 'export_policies_to_exclude', 'export_policies_to_include', 'export_policies_to_include', 'file_extensions_to_exclude', + 'file_extensions_to_include', 'shares_to_exclude', 'shares_to_include', 'volumes_to_exclude', 'volumes_to_include' + ): + return_value[field] = [] + if field in records[0]: + return_value[field] = records[0][field] + + return return_value + + else: + fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-get-iter') + fpolicy_scope_config = netapp_utils.zapi.NaElement('fpolicy-scope-config') + fpolicy_scope_config.add_new_child('policy-name', self.parameters['name']) + fpolicy_scope_config.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(fpolicy_scope_config) + fpolicy_scope_obj.add_child_elem(query) + + try: + result = self.server.invoke_successfully(fpolicy_scope_obj, True) + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error searching for FPolicy policy scope %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + if result.get_child_by_name('attributes-list'): + fpolicy_scope_attributes = result['attributes-list']['fpolicy-scope-config'] + param_dict = { + 'export_policies_to_exclude': [], + 'export_policies_to_include': [], + 'file_extensions_to_exclude': [], + 'file_extensions_to_include': [], + 'shares_to_exclude': [], + 'shares_to_include': [], + 'volumes_to_exclude': [], + 'volumes_to_include': [] + } + + for param in param_dict.keys(): + if fpolicy_scope_attributes.get_child_by_name(param.replace('_', '-')): + param_dict[param] = [ + child_name.get_content() for child_name in fpolicy_scope_attributes.get_child_by_name((param.replace('_', '-'))).get_children() + ] + + return_value = { + 'name': fpolicy_scope_attributes.get_child_content('policy-name'), + 'check_extensions_on_directories': self.na_helper.get_value_for_bool( + from_zapi=True, value=fpolicy_scope_attributes.get_child_content('check-extensions-on-directories')), + 'is_monitoring_of_objects_with_no_extension_enabled': self.na_helper.get_value_for_bool( + from_zapi=True, value=fpolicy_scope_attributes.get_child_content('is-monitoring-of-objects-with-no-extension-enabled')), + } + return_value.update(param_dict) + return return_value + + def create_fpolicy_scope(self): + """ + Create an FPolicy policy scope + :return: nothing + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/scope" + body = { + 'vserver': self.parameters['vserver'], + 'policy-name': self.parameters['name'] + } + for parameter in ( + 'export_policies_to_exclude', 'export_policies_to_include', 'export_policies_to_include', 'file_extensions_to_exclude', + 'file_extensions_to_include', 'shares_to_exclude', 'shares_to_include', 'volumes_to_exclude', 'volumes_to_include', + 'is-file-extension-check-on-directories-enabled', 'is-monitoring-of-objects-with-no-extension-enabled' + ): + if parameter in self.parameters: + body[parameter.replace('_', '-')] = self.parameters[parameter] + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + else: + fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-create') + fpolicy_scope_obj.add_new_child('policy-name', self.parameters['name']) + + if 'check_extensions_on_directories' in self.parameters: + fpolicy_scope_obj.add_new_child( + 'check-extensions-on-directories', self.na_helper.get_value_for_bool( + from_zapi=False, value=self.parameters['check_extensions_on_directories'] + ) + ) + + if 'is_monitoring_of_objects_with_no_extension_enabled' in self.parameters: + fpolicy_scope_obj.add_new_child( + 'is-monitoring-of-objects-with-no-extension-enabled', self.na_helper.get_value_for_bool( + from_zapi=False, value=self.parameters['is_monitoring_of_objects_with_no_extension_enabled'] + ) + ) + + if 'export_policies_to_exclude' in self.parameters: + export_policies_to_exclude_obj = netapp_utils.zapi.NaElement('export-policies-to-exclude') + for export_policies_to_exclude in self.parameters['export_policies_to_exclude']: + export_policies_to_exclude_obj.add_new_child('string', export_policies_to_exclude) + fpolicy_scope_obj.add_child_elem(export_policies_to_exclude_obj) + + if 'export_policies_to_include' in self.parameters: + export_policies_to_include_obj = netapp_utils.zapi.NaElement('export-policies-to-include') + for export_policies_to_include in self.parameters['export_policies_to_include']: + export_policies_to_include_obj.add_new_child('string', export_policies_to_include) + fpolicy_scope_obj.add_child_elem(export_policies_to_include_obj) + + if 'file_extensions_to_exclude' in self.parameters: + file_extensions_to_exclude_obj = netapp_utils.zapi.NaElement('file-extensions-to-exclude') + for file_extensions_to_exclude in self.parameters['file_extensions_to_exclude']: + file_extensions_to_exclude_obj.add_new_child('string', file_extensions_to_exclude) + fpolicy_scope_obj.add_child_elem(file_extensions_to_exclude_obj) + + if 'file_extensions_to_include' in self.parameters: + file_extensions_to_include_obj = netapp_utils.zapi.NaElement('file-extensions-to-include') + for file_extensions_to_include in self.parameters['file_extensions_to_include']: + file_extensions_to_include_obj.add_new_child('string', file_extensions_to_include) + fpolicy_scope_obj.add_child_elem(file_extensions_to_include_obj) + + if 'shares_to_exclude' in self.parameters: + shares_to_exclude_obj = netapp_utils.zapi.NaElement('shares-to-exclude') + for shares_to_exclude in self.parameters['shares_to_exclude']: + shares_to_exclude_obj.add_new_child('string', shares_to_exclude) + fpolicy_scope_obj.add_child_elem(shares_to_exclude_obj) + + if 'volumes_to_exclude' in self.parameters: + volumes_to_exclude_obj = netapp_utils.zapi.NaElement('volumes-to-exclude') + for volumes_to_exclude in self.parameters['volumes_to_exclude']: + volumes_to_exclude_obj.add_new_child('string', volumes_to_exclude) + fpolicy_scope_obj.add_child_elem(volumes_to_exclude_obj) + + if 'shares_to_include' in self.parameters: + shares_to_include_obj = netapp_utils.zapi.NaElement('shares-to-include') + for shares_to_include in self.parameters['shares_to_include']: + shares_to_include_obj.add_new_child('string', shares_to_include) + fpolicy_scope_obj.add_child_elem(shares_to_include_obj) + + if 'volumes_to_include' in self.parameters: + volumes_to_include_obj = netapp_utils.zapi.NaElement('volumes-to-include') + for volumes_to_include in self.parameters['volumes_to_include']: + volumes_to_include_obj.add_new_child('string', volumes_to_include) + fpolicy_scope_obj.add_child_elem(volumes_to_include_obj) + + try: + self.server.invoke_successfully(fpolicy_scope_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error creating fPolicy policy scope %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def modify_fpolicy_scope(self, modify): + """ + Modify an FPolicy policy scope + :return: nothing + """ + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/scope" + query = {'vserver': self.parameters['vserver']} + query['policy-name'] = self.parameters['name'] + dummy, error = self.rest_api.patch(api, modify, query) + if error: + self.module.fail_json(msg=error) + + else: + fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-modify') + fpolicy_scope_obj.add_new_child('policy-name', self.parameters['name']) + + if 'check_extensions_on_directories' in self.parameters: + fpolicy_scope_obj.add_new_child( + 'check-extensions-on-directories', self.na_helper.get_value_for_bool( + from_zapi=False, value=self.parameters['check_extensions_on_directories'] + ) + ) + + if 'is_monitoring_of_objects_with_no_extension_enabled' in self.parameters: + fpolicy_scope_obj.add_new_child( + 'is-monitoring-of-objects-with-no-extension-enabled', + self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_monitoring_of_objects_with_no_extension_enabled']) + ) + + if 'export_policies_to_exclude' in self.parameters: + export_policies_to_exclude_obj = netapp_utils.zapi.NaElement('export-policies-to-exclude') + for export_policies_to_exclude in self.parameters['export_policies_to_exclude']: + export_policies_to_exclude_obj.add_new_child('string', export_policies_to_exclude) + fpolicy_scope_obj.add_child_elem(export_policies_to_exclude_obj) + + if 'export_policies_to_include' in self.parameters: + export_policies_to_include_obj = netapp_utils.zapi.NaElement('export-policies-to-include') + + for export_policies_to_include in self.parameters['export_policies_to_include']: + export_policies_to_include_obj.add_new_child('string', export_policies_to_include) + fpolicy_scope_obj.add_child_elem(export_policies_to_include_obj) + + if 'file_extensions_to_exclude' in self.parameters: + file_extensions_to_exclude_obj = netapp_utils.zapi.NaElement('file-extensions-to-exclude') + + for file_extensions_to_exclude in self.parameters['file_extensions_to_exclude']: + file_extensions_to_exclude_obj.add_new_child('string', file_extensions_to_exclude) + fpolicy_scope_obj.add_child_elem(file_extensions_to_exclude_obj) + + if 'file_extensions_to_include' in self.parameters: + file_extensions_to_include_obj = netapp_utils.zapi.NaElement('file-extensions-to-include') + + for file_extensions_to_include in self.parameters['file_extensions_to_include']: + file_extensions_to_include_obj.add_new_child('string', file_extensions_to_include) + fpolicy_scope_obj.add_child_elem(file_extensions_to_include_obj) + + if 'shares_to_exclude' in self.parameters: + shares_to_exclude_obj = netapp_utils.zapi.NaElement('shares-to-exclude') + + for shares_to_exclude in self.parameters['shares_to_exclude']: + shares_to_exclude_obj.add_new_child('string', shares_to_exclude) + fpolicy_scope_obj.add_child_elem(shares_to_exclude_obj) + + if 'volumes_to_exclude' in self.parameters: + volumes_to_exclude_obj = netapp_utils.zapi.NaElement('volumes-to-exclude') + + for volumes_to_exclude in self.parameters['volumes_to_exclude']: + volumes_to_exclude_obj.add_new_child('string', volumes_to_exclude) + fpolicy_scope_obj.add_child_elem(volumes_to_exclude_obj) + + if 'shares_to_include' in self.parameters: + shares_to_include_obj = netapp_utils.zapi.NaElement('shares-to-include') + + for shares_to_include in self.parameters['shares_to_include']: + shares_to_include_obj.add_new_child('string', shares_to_include) + fpolicy_scope_obj.add_child_elem(shares_to_include_obj) + + if 'volumes_to_include' in self.parameters: + volumes_to_include_obj = netapp_utils.zapi.NaElement('volumes-to-include') + + for volumes_to_include in self.parameters['volumes_to_include']: + volumes_to_include_obj.add_new_child('string', volumes_to_include) + fpolicy_scope_obj.add_child_elem(volumes_to_include_obj) + + try: + self.server.invoke_successfully(fpolicy_scope_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying fPolicy policy scope %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def delete_fpolicy_scope(self): + """ + Delete an FPolicy policy scope + :return: nothing + """ + + if self.use_rest: + api = "/private/cli/vserver/fpolicy/policy/scope" + body = { + 'vserver': self.parameters['vserver'], + 'policy-name': self.parameters['name'] + } + dummy, error = self.rest_api.delete(api, body) + if error: + self.module.fail_json(msg=error) + else: + fpolicy_scope_obj = netapp_utils.zapi.NaElement('fpolicy-policy-scope-delete') + fpolicy_scope_obj.add_new_child('policy-name', self.parameters['name']) + + try: + self.server.invoke_successfully(fpolicy_scope_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error deleting fPolicy policy scope %s on vserver %s: %s' % ( + self.parameters['name'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + + def apply(self): + current, modify = self.get_fpolicy_scope(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_fpolicy_scope() + elif cd_action == 'delete': + self.delete_fpolicy_scope() + elif modify: + self.modify_fpolicy_scope(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapFpolicyScope() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py new file mode 100644 index 000000000..8d55683ba --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_fpolicy_status.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_fpolicy_status +short_description: NetApp ONTAP - Enables or disables the specified fPolicy policy +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Enable or disable fPolicy policy. +options: + state: + description: + - Whether the fPolicy policy is enabled or disabled. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Name of the vserver to enable fPolicy on. + type: str + required: true + + policy_name: + description: + - Name of the policy. + type: str + required: true + + sequence_number: + description: + - Policy Sequence Number. + type: int + +notes: +- Not support check_mode. +""" + +EXAMPLES = """ + - name: Enable fPolicy policy + na_ontap_fpolicy_status: + state: present + vserver: svm1 + policy_name: fpolicy_policy + sequence_number: 10 + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + https: true + validate_certs: false + + - name: Disable fPolicy policy + na_ontap_fpolicy_status: + state: absent + vserver: svm1 + policy_name: fpolicy_policy + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + https: true + validate_certs: false + +""" + +RETURN = """ # """ + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapFpolicyStatus(object): + """ + Enables or disabled NetApp ONTAP fPolicy + """ + def __init__(self): + """ + Initialize the ONTAP fPolicy status class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + policy_name=dict(required=True, type='str'), + sequence_number=dict(required=False, type='int') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[('state', 'present', ['sequence_number'])], + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters['state'] == 'present': + self.parameters['status'] = True + else: + self.parameters['status'] = False + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_fpolicy_policy_status(self): + """ + Check to see the status of the fPolicy policy + :return: dict of status properties + """ + return_value = None + + if self.use_rest: + api = '/protocols/fpolicy' + query = { + 'svm.name': self.parameters['vserver'], + 'fields': 'policies' + } + + message, error = self.rest_api.get(api, query) + if error: + self.module.fail_json(msg=error) + records, error = rrh.check_for_0_or_more_records(api, message, error) + if records is not None: + for policy in records[0]['policies']: + if policy['name'] == self.parameters['policy_name']: + return_value = {} + return_value['vserver'] = records[0]['svm']['name'] + return_value['policy_name'] = policy['name'] + return_value['status'] = policy['enabled'] + break + if not return_value: + self.module.fail_json(msg='Error getting fPolicy policy %s for vserver %s as policy does not exist' % + (self.parameters['policy_name'], self.parameters['vserver'])) + return return_value + + else: + + fpolicy_status_obj = netapp_utils.zapi.NaElement('fpolicy-policy-status-get-iter') + fpolicy_status_info = netapp_utils.zapi.NaElement('fpolicy-policy-status-info') + fpolicy_status_info.add_new_child('policy-name', self.parameters['policy_name']) + fpolicy_status_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(fpolicy_status_info) + fpolicy_status_obj.add_child_elem(query) + + try: + result = self.server.invoke_successfully(fpolicy_status_obj, True) + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting status for fPolicy policy %s for vserver %s: %s' % + (self.parameters['policy_name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + if result.get_child_by_name('attributes-list'): + fpolicy_status_attributes = result['attributes-list']['fpolicy-policy-status-info'] + + return_value = { + 'vserver': fpolicy_status_attributes.get_child_content('vserver'), + 'policy_name': fpolicy_status_attributes.get_child_content('policy-name'), + 'status': self.na_helper.get_value_for_bool(True, fpolicy_status_attributes.get_child_content('status')), + } + return return_value + + def get_svm_uuid(self): + """ + Gets SVM uuid based on name + :return: string of uuid + """ + api = '/svm/svms' + query = { + 'name': self.parameters['vserver'] + } + message, error = self.rest_api.get(api, query) + + if error: + self.module.fail_json(msg=error) + + return message['records'][0]['uuid'] + + def enable_fpolicy_policy(self): + """ + Enables fPolicy policy + :return: nothing + """ + + if self.use_rest: + api = '/protocols/fpolicy/%s/policies/%s' % (self.svm_uuid, self.parameters['policy_name']) + body = { + 'enabled': self.parameters['status'], + 'priority': self.parameters['sequence_number'] + } + + dummy, error = self.rest_api.patch(api, body) + if error: + self.module.fail_json(msg=error) + + else: + fpolicy_enable_obj = netapp_utils.zapi.NaElement('fpolicy-enable-policy') + fpolicy_enable_obj.add_new_child('policy-name', self.parameters['policy_name']) + fpolicy_enable_obj.add_new_child('sequence-number', self.na_helper.get_value_for_int(False, self.parameters['sequence_number'])) + try: + self.server.invoke_successfully(fpolicy_enable_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error enabling fPolicy policy %s on vserver %s: %s' % + (self.parameters['policy_name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def disable_fpolicy_policy(self): + """ + Disables fPolicy policy + :return: nothing + """ + + if self.use_rest: + api = '/protocols/fpolicy/%s/policies/%s' % (self.svm_uuid, self.parameters['policy_name']) + body = { + 'enabled': self.parameters['status'] + } + + dummy, error = self.rest_api.patch(api, body) + if error: + self.module.fail_json(msg=error) + + else: + + fpolicy_disable_obj = netapp_utils.zapi.NaElement('fpolicy-disable-policy') + fpolicy_disable_obj.add_new_child('policy-name', self.parameters['policy_name']) + + try: + self.server.invoke_successfully(fpolicy_disable_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error disabling fPolicy policy %s on vserver %s: %s' % + (self.parameters['policy_name'], self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + if self.use_rest: + self.svm_uuid = self.get_svm_uuid() + + current = self.get_fpolicy_policy_status() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if modify['status']: + self.enable_fpolicy_policy() + elif not modify['status']: + self.disable_fpolicy_policy() + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + """ + Enables or disables NetApp ONTAP fPolicy + """ + command = NetAppOntapFpolicyStatus() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py new file mode 100644 index 000000000..3093537ef --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup.py @@ -0,0 +1,697 @@ +#!/usr/bin/python +''' na_ontap_igroup + + (c) 2018-2022, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_igroup +short_description: NetApp ONTAP iSCSI or FC igroup configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create/Delete/Rename Igroups and Modify initiators belonging to an igroup + +options: + state: + description: + - Whether the specified Igroup should exist or not. + choices: ['present', 'absent'] + type: str + default: present + + name: + description: + - The name of the igroup to manage. + required: true + type: str + + initiator_group_type: + description: + - Type of the initiator group. + - Required when C(state=present). + choices: ['fcp', 'iscsi', 'mixed'] + type: str + aliases: ['protocol'] + + from_name: + description: + - Name of igroup to rename to name. + version_added: 2.7.0 + type: str + + os_type: + description: + - OS type of the initiators within the group. + type: str + aliases: ['ostype'] + + igroups: + description: + - List of igroups to be mapped to the igroup. + - For a modify operation, this list replaces the existing igroups, or existing initiators. + - This module does not add or remove specific igroup(s) in an igroup. + - Mutually exclusive with initiator_names (initiators) and initiator_objects. + - Requires ONTAP 9.9 or newer. + type: list + elements: str + version_added: 21.3.0 + + initiator_names: + description: + - List of initiators to be mapped to the igroup. + - WWPN, WWPN Alias, or iSCSI name of Initiator to add or remove. + - For a modify operation, this list replaces the existing initiators, or existing igroups. + - This module does not add or remove specific initiator(s) in an igroup. + - Mutually exclusive with igroups and initiator_objects. + - This serves the same purpose as initiator_objects, but without the comment suboption. + aliases: + - initiator + - initiators + type: list + elements: str + version_added: 21.4.0 + + initiator_objects: + description: + - List of initiators to be mapped to the igroup, with an optional comment field. + - WWPN, WWPN Alias, or iSCSI name of Initiator to add or remove. + - For a modify operation, this list replaces the existing initiators, or existing igroups. + - This module does not add or remove specific initiator(s) in an igroup. + - Mutually exclusive with initiator_names (initiators) and igroups. + - Requires ONTAP 9.9 with REST support. + type: list + elements: dict + suboptions: + name: + description: name of the initiator. + type: str + required: true + comment: + description: a more descriptive comment as the WWPN can be quite opaque. + type: str + version_added: 21.4.0 + + bind_portset: + description: + - Name of a current portset to bind to the newly created igroup. + type: str + + force_remove_initiator: + description: + - Forcibly remove the initiator even if there are existing LUNs mapped to this initiator group. + - This parameter should be used with caution. + type: bool + default: false + aliases: ['allow_delete_while_mapped'] + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + +notes: + - supports check mode. + - supports ZAPI and REST. +''' + +EXAMPLES = ''' + - name: Create iSCSI Igroup + netapp.ontap.na_ontap_igroup: + state: present + name: ansibleIgroup3 + initiator_group_type: iscsi + os_type: linux + initiator_names: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com,abc.com:redhat.com + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create iSCSI Igroup - ONTAP 9.9 + netapp.ontap.na_ontap_igroup: + state: present + name: ansibleIgroup3 + initiator_group_type: iscsi + os_type: linux + initiator_objects: + - name: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com + comment: for test only + - name: abc.com:redhat.com + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create FC Igroup + netapp.ontap.na_ontap_igroup: + state: present + name: ansibleIgroup4 + initiator_group_type: fcp + os_type: linux + initiator_names: 20:00:00:50:56:9f:19:82 + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: rename Igroup + netapp.ontap.na_ontap_igroup: + state: present + from_name: ansibleIgroup3 + name: testexamplenewname + initiator_group_type: iscsi + os_type: linux + initiator_names: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify Igroup Initiators (replaces exisiting initiator_names) + netapp.ontap.na_ontap_igroup: + state: present + name: ansibleIgroup3 + initiator_group_type: iscsi + os_type: linux + initiator: iqn.1994-05.com.redhat:scspa0395855001.rtp.openenglab.netapp.com + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete Igroup + netapp.ontap.na_ontap_igroup: + state: absent + name: ansibleIgroup3 + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapIgroup: + """Create/Delete/Rename Igroups and Modify initiators list""" + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str', default=None), + os_type=dict(required=False, type='str', aliases=['ostype']), + igroups=dict(required=False, type='list', elements='str'), + initiator_group_type=dict(required=False, type='str', + choices=['fcp', 'iscsi', 'mixed'], + aliases=['protocol']), + initiator_names=dict(required=False, type='list', elements='str', aliases=['initiator', 'initiators']), + initiator_objects=dict(required=False, type='list', elements='dict', options=dict( + name=dict(required=True, type='str'), + comment=dict(type='str'), + )), + vserver=dict(required=True, type='str'), + force_remove_initiator=dict(required=False, type='bool', default=False, aliases=['allow_delete_while_mapped']), + bind_portset=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('igroups', 'initiator_names'), ('igroups', 'initiator_objects'), ('initiator_objects', 'initiator_names'), ] + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_modify_zapi_to_rest = dict( + # initiator_group_type (protocol) cannot be changed after create + bind_portset='portset', + name='name', + os_type='os_type' + ) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + # use the new structure, a list of dict with name/comment as keys. + if self.parameters.get('initiator_names') is not None: + self.parameters['initiator_objects'] = [ + dict(name=initiator, comment=None) + for initiator in self.parameters['initiator_names']] + + if self.parameters.get('initiator_objects') is not None: + ontap_99_option = 'comment' + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and\ + any(x[ontap_99_option] is not None for x in self.parameters['initiator_objects']): + msg = 'Error: in initiator_objects: %s' % self.rest_api.options_require_ontap_version(ontap_99_option, version='9.9', use_rest=self.use_rest) + self.module.fail_json(msg=msg) + # sanitize WWNs and convert to lowercase for idempotency + self.parameters['initiator_objects'] = [ + dict(name=self.na_helper.sanitize_wwn(initiator['name']), comment=initiator['comment']) + for initiator in self.parameters['initiator_objects']] + # keep a list of names as it is convenient for add and remove computations + self.parameters['initiator_names'] = [initiator['name'] for initiator in self.parameters['initiator_objects']] + + def too_old_for_rest(minimum_generation, minimum_major): + return self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, minimum_generation, minimum_major, 0) + + ontap_99_options = ['bind_portset'] + if too_old_for_rest(9, 9) and any(x in self.parameters for x in ontap_99_options): + self.module.warn('Warning: falling back to ZAPI: %s' % self.rest_api.options_require_ontap_version(ontap_99_options, version='9.9')) + self.use_rest = False + + ontap_99_options = ['igroups'] + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1) and any(x in self.parameters for x in ontap_99_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_99_options, version='9.9.1')) + + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + if 'igroups' in self.parameters: + # we may need to remove existing initiators + self.parameters['initiator_names'] = list() + elif 'initiator_names' in self.parameters: + # we may need to remove existing igroups + self.parameters['igroups'] = list() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def fail_on_error(self, error, stack=False): + if error is None: + return + elements = dict(msg="Error: %s" % error) + if stack: + elements['stack'] = traceback.format_stack() + self.module.fail_json(**elements) + + def get_igroup_rest(self, name): + api = "protocols/san/igroups" + fields = 'name,uuid,svm,initiators,os_type,protocol' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + fields += ',igroups' + query = dict(name=name, fields=fields) + query['svm.name'] = self.parameters['vserver'] + record, error = rest_generic.get_one_record(self.rest_api, api, query) + self.fail_on_error(error) + if record: + try: + igroup_details = dict( + name=record['name'], + uuid=record['uuid'], + vserver=record['svm']['name'], + os_type=record['os_type'], + initiator_group_type=record['protocol'], + name_to_uuid=dict() + ) + except KeyError as exc: + self.module.fail_json(msg='Error: unexpected igroup body: %s, KeyError on %s' % (str(record), str(exc))) + igroup_details['name_to_key'] = {} + for attr in ('igroups', 'initiators'): + option = 'initiator_names' if attr == 'initiators' else attr + if attr in record: + igroup_details[option] = [item['name'] for item in record[attr]] + if attr == 'initiators': + igroup_details['initiator_objects'] = [dict(name=item['name'], comment=item.get('comment')) for item in record[attr]] + # for initiators, there is no uuid, so we're using name as the key + igroup_details['name_to_uuid'][option] = dict((item['name'], item.get('uuid', item['name'])) for item in record[attr]) + else: + igroup_details[option] = [] + igroup_details['name_to_uuid'][option] = {} + return igroup_details + return None + + def get_igroup(self, name): + """ + Return details about the igroup + :param: + name : Name of the igroup + + :return: Details about the igroup. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_igroup_rest(name) + + igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter') + attributes = dict(query={'initiator-group-info': {'initiator-group-name': name, + 'vserver': self.parameters['vserver']}}) + igroup_info.translate_struct(attributes) + current = None + + try: + result = self.server.invoke_successfully(igroup_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + igroup_info = result.get_child_by_name('attributes-list') + initiator_group_info = igroup_info.get_child_by_name('initiator-group-info') + initiator_names = [] + initiator_objects = [] + if initiator_group_info.get_child_by_name('initiators'): + current_initiators = initiator_group_info['initiators'].get_children() + initiator_names = [initiator['initiator-name'] for initiator in current_initiators] + initiator_objects = [dict(name=initiator['initiator-name'], comment=None) for initiator in current_initiators] + current = { + 'initiator_names': initiator_names, + 'initiator_objects': initiator_objects, + # place holder, not used for ZAPI + 'name_to_uuid': dict(initiator_names=dict()) + } + zapi_to_params = { + 'vserver': 'vserver', + 'initiator-group-os-type': 'os_type', + 'initiator-group-portset-name': 'bind_portset', + 'initiator-group-type': 'initiator_group_type' + } + for attr in zapi_to_params: + value = igroup_info.get_child_content(attr) + if value is not None: + current[zapi_to_params[attr]] = value + return current + + def check_option_is_valid(self, option): + if self.use_rest and option in ('igroups', 'initiator_names'): + return + if option == 'initiator_names': + return + raise KeyError('check_option_is_valid: option=%s' % option) + + @staticmethod + def get_rest_name_for_option(option): + if option == 'initiator_names': + return 'initiators' + if option == 'igroups': + return option + raise KeyError('get_rest_name_for_option: option=%s' % option) + + def add_initiators_or_igroups_rest(self, uuid, option, names): + self.check_option_is_valid(option) + api = "protocols/san/igroups/%s/%s" % (uuid, self.get_rest_name_for_option(option)) + if option == 'initiator_names' and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + in_objects = self.parameters['initiator_objects'] + records = [self.na_helper.filter_out_none_entries(item) for item in in_objects if item['name'] in names] + else: + records = [dict(name=name) for name in names] + body = dict(records=records) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + self.fail_on_error(error) + + def modify_initiators_rest(self, uuid, initiator_objects): + for initiator in initiator_objects: + if 'comment' in initiator: + api = "protocols/san/igroups/%s/initiators" % uuid + body = dict(comment=initiator['comment']) + dummy, error = rest_generic.patch_async(self.rest_api, api, initiator['name'], body) + self.fail_on_error(error) + + def add_initiators_or_igroups(self, uuid, option, current_names): + """ + Add the list of desired initiators to igroup unless they are already set + :return: None + """ + self.check_option_is_valid(option) + # don't add if initiator_names/igroups is empty string + if self.parameters.get(option) == [''] or self.parameters.get(option) is None: + return + names_to_add = [name for name in self.parameters[option] if name not in current_names] + if self.use_rest and names_to_add: + self.add_initiators_or_igroups_rest(uuid, option, names_to_add) + else: + for name in names_to_add: + self.modify_initiator(name, 'igroup-add') + + def delete_initiator_or_igroup_rest(self, uuid, option, name_or_uuid): + self.check_option_is_valid(option) + api = "protocols/san/igroups/%s/%s" % (uuid, self.get_rest_name_for_option(option)) + query = {'allow_delete_while_mapped': True} if self.parameters['force_remove_initiator'] else None + dummy, error = rest_generic.delete_async(self.rest_api, api, name_or_uuid, query=query) + self.fail_on_error(error) + + def remove_initiators_or_igroups(self, uuid, option, current_names, mapping): + """ + Removes current names from igroup unless they are still desired + :return: None + """ + self.check_option_is_valid(option) + for name in current_names: + if name not in self.parameters.get(option, list()): + if self.use_rest: + self.delete_initiator_or_igroup_rest(uuid, option, mapping[name]) + else: + self.modify_initiator(name, 'igroup-remove') + + def modify_initiator(self, initiator, zapi): + """ + Add or remove an initiator to/from an igroup + """ + options = {'initiator-group-name': self.parameters['name'], + 'initiator': initiator} + if zapi == 'igroup-remove' and self.parameters.get('force_remove_initiator'): + options['force'] = 'true' + + igroup_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + + try: + self.server.invoke_successfully(igroup_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (self.parameters['name'], + to_native(error)), + exception=traceback.format_exc()) + + def create_igroup_rest(self): + api = "protocols/san/igroups" + body = dict( + name=self.parameters['name'], + os_type=self.parameters['os_type']) + body['svm'] = dict(name=self.parameters['vserver']) + mapping = dict( + initiator_group_type='protocol', + bind_portset='portset', + igroups='igroups', + initiator_objects='initiators' + ) + for option in mapping: + value = self.parameters.get(option) + if value is not None: + if option in ('igroups', 'initiator_objects'): + # we may have an empty list, ignore it + if option == 'initiator_objects': + value = [self.na_helper.filter_out_none_entries(item) for item in value] if value else None + else: + value = [dict(name=name) for name in value] if value else None + if value is not None: + body[mapping[option]] = value + dummy, error = rest_generic.post_async(self.rest_api, api, body) + self.fail_on_error(error) + + def create_igroup(self): + """ + Create the igroup. + """ + if self.use_rest: + return self.create_igroup_rest() + + options = {'initiator-group-name': self.parameters['name']} + if self.parameters.get('os_type') is not None: + options['os-type'] = self.parameters['os_type'] + if self.parameters.get('initiator_group_type') is not None: + options['initiator-group-type'] = self.parameters['initiator_group_type'] + if self.parameters.get('bind_portset') is not None: + options['bind-portset'] = self.parameters['bind_portset'] + + igroup_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'igroup-create', **options) + + try: + self.server.invoke_successfully(igroup_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error provisioning igroup %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + self.add_initiators_or_igroups(None, 'initiator_names', []) + + @staticmethod + def change_in_initiator_comments(modify, current): + + if 'initiator_objects' not in current: + return list() + + # for python 2.6 + comments = dict((item['name'], item['comment']) for item in current['initiator_objects']) + + def has_changed_comment(item): + return item['name'] in comments and item['comment'] is not None and item['comment'] != comments[item['name']] + + return [item for item in modify['initiator_objects'] if has_changed_comment(item)] + + def modify_igroup_rest(self, uuid, modify): + api = "protocols/san/igroups" + body = dict() + for option in modify: + if option not in self.rest_modify_zapi_to_rest: + self.module.fail_json(msg='Error: modifying %s is not supported in REST' % option) + body[self.rest_modify_zapi_to_rest[option]] = modify[option] + if body: + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + self.fail_on_error(error) + + def delete_igroup_rest(self, uuid): + api = "protocols/san/igroups" + query = {'allow_delete_while_mapped': True} if self.parameters['force_remove_initiator'] else None + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid, query=query) + self.fail_on_error(error) + + def delete_igroup(self, uuid): + """ + Delete the igroup. + """ + if self.use_rest: + return self.delete_igroup_rest(uuid) + + igroup_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'igroup-destroy', **{'initiator-group-name': self.parameters['name'], + 'force': 'true' if self.parameters['force_remove_initiator'] else 'false'}) + + try: + self.server.invoke_successfully(igroup_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting igroup %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def rename_igroup(self): + """ + Rename the igroup. + """ + if self.use_rest: + self.module.fail_json(msg='Internal error, should not call rename, but use modify') + + igroup_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'igroup-rename', **{'initiator-group-name': self.parameters['from_name'], + 'initiator-group-new-name': str(self.parameters['name'])}) + try: + self.server.invoke_successfully(igroup_rename, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming igroup %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def report_error_in_modify(self, modify, context): + if modify: + if len(modify) > 1: + tag = 'any of ' + else: + tag = '' + self.module.fail_json(msg='Error: modifying %s %s is not supported in %s' % (tag, str(modify), context)) + + def validate_modify(self, modify): + """Identify options that cannot be modified for REST or ZAPI + """ + if not modify: + return + modify_local = dict(modify) + modify_local.pop('igroups', None) + modify_local.pop('initiator_names', None) + modify_local.pop('initiator_objects', None) + if not self.use_rest: + self.report_error_in_modify(modify_local, 'ZAPI') + return + for option in modify: + if option in self.rest_modify_zapi_to_rest: + modify_local.pop(option) + self.report_error_in_modify(modify_local, 'REST') + + def is_rename_action(self, cd_action, current): + old = self.get_igroup(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(old, current) + if rename is None: + self.module.fail_json(msg='Error: igroup with from_name=%s not found' % self.parameters.get('from_name')) + if rename: + current = old + cd_action = None + return cd_action, rename, current + + def modify_igroup(self, uuid, current, modify): + for attr in ('igroups', 'initiator_names'): + if attr in current: + # we need to remove everything first + self.remove_initiators_or_igroups(uuid, attr, current[attr], current['name_to_uuid'][attr]) + for attr in ('igroups', 'initiator_names'): + if attr in current: + self.add_initiators_or_igroups(uuid, attr, current[attr]) + modify.pop(attr, None) + if 'initiator_objects' in modify: + if self.use_rest: + # comments are not supported in ZAPI, we already checked for that in validate changes + changed_initiator_objects = self.change_in_initiator_comments(modify, current) + self.modify_initiators_rest(uuid, changed_initiator_objects) + modify.pop('initiator_objects',) + if modify: + # validate_modify ensured modify is empty with ZAPI + self.modify_igroup_rest(uuid, modify) + + def apply(self): + uuid = None + rename, modify = None, None + current = self.get_igroup(self.parameters['name']) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + cd_action, rename, current = self.is_rename_action(cd_action, current) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + # a change in name is handled in rename for ZAPI, but REST can use modify + if self.use_rest: + rename = False + else: + modify.pop('name', None) + if current and self.use_rest: + uuid = current['uuid'] + if cd_action == 'create' and self.use_rest and 'os_type' not in self.parameters: + self.module.fail_json(msg='Error: os_type is a required parameter when creating an igroup with REST') + saved_modify = str(modify) + self.validate_modify(modify) + + if self.na_helper.changed and not self.module.check_mode: + if rename: + self.rename_igroup() + elif cd_action == 'create': + self.create_igroup() + elif cd_action == 'delete': + self.delete_igroup(uuid) + if modify: + self.modify_igroup(uuid, current, modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify=saved_modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapIgroup() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py new file mode 100644 index 000000000..7280eb181 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_igroup_initiator.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +''' This is an Ansible module for ONTAP, to manage initiators in an Igroup + + (c) 2019-2022, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_igroup_initiator +short_description: NetApp ONTAP igroup initiator configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Add/Remove initiators from an igroup + +options: + state: + description: + - Whether the specified initiator should exist or not in an igroup. + choices: ['present', 'absent'] + type: str + default: present + + names: + description: + - List of initiators to manage. + required: true + aliases: + - name + type: list + elements: str + + initiator_group: + description: + - Name of the initiator group to which the initiator belongs. + required: true + type: str + + force_remove: + description: + - Forcibly remove the initiators even if there are existing LUNs mapped to the initiator group. + type: bool + default: false + version_added: '20.1.0' + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + +''' + +EXAMPLES = ''' + - name: Add initiators to an igroup + netapp.ontap.na_ontap_igroup_initiator: + names: abc.test:def.com,def.test:efg.com + initiator_group: test_group + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Remove an initiator from an igroup + netapp.ontap.na_ontap_igroup_initiator: + state: absent + names: abc.test:def.com + initiator_group: test_group + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapIgroupInitiator(object): + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + names=dict(required=True, type='list', elements='str', aliases=['name']), + initiator_group=dict(required=True, type='str'), + force_remove=dict(required=False, type='bool', default=False), + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.uuid = None + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_initiators(self): + """ + Get the existing list of initiators from an igroup + :rtype: list() or None + """ + if self.use_rest: + return self.get_initiators_rest() + igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter') + attributes = dict(query={'initiator-group-info': {'initiator-group-name': self.parameters['initiator_group'], + 'vserver': self.parameters['vserver']}}) + igroup_info.translate_struct(attributes) + result, current = None, [] + + try: + result = self.server.invoke_successfully(igroup_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['initiator_group'], + to_native(error)), + exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + igroup_info = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info') + if igroup_info.get_child_by_name('initiators') is not None: + current = [initiator['initiator-name'] for initiator in igroup_info['initiators'].get_children()] + return current + + def modify_initiator(self, initiator_name, zapi): + """ + Add or remove an initiator to/from an igroup + """ + if self.use_rest: + return self.modify_initiator_rest(initiator_name, zapi) + options = {'initiator-group-name': self.parameters['initiator_group'], + 'initiator': initiator_name, + 'force': 'true' if zapi == 'igroup-remove' and self.parameters['force_remove'] else 'false'} + initiator_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + + try: + self.server.invoke_successfully(initiator_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (initiator_name, + to_native(error)), + exception=traceback.format_exc()) + + def get_initiators_rest(self): + api = 'protocols/san/igroups' + query = {'name': self.parameters['initiator_group'], 'svm.name': self.parameters['vserver']} + fields = 'initiators,uuid' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error fetching igroup info %s: %s" % (self.parameters['initiator_group'], error)) + current = [] + if record: + self.uuid = record['uuid'] + # igroup may have 0 initiators. + if 'initiators' in record: + current = [initiator['name'] for initiator in record['initiators']] + return current + + def modify_initiator_rest(self, initiator_name, modify_action): + if self.uuid is None: + self.module.fail_json(msg="Error modifying igroup initiator %s: igroup not found" % initiator_name) + api = 'protocols/san/igroups/%s/initiators' % self.uuid + if modify_action == 'igroup-add': + body = {"name": initiator_name} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + else: + query = {'allow_delete_while_mapped': self.parameters['force_remove']} + dummy, error = rest_generic.delete_async(self.rest_api, api, initiator_name, query) + if error: + self.module.fail_json(msg="Error modifying igroup initiator %s: %s" % (initiator_name, error)) + + def apply(self): + initiators = self.get_initiators() + for initiator in self.parameters['names']: + present = None + initiator = self.na_helper.sanitize_wwn(initiator) + if initiator in initiators: + present = True + cd_action = self.na_helper.get_cd_action(present, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.modify_initiator(initiator, 'igroup-add') + elif cd_action == 'delete': + self.modify_initiator(initiator, 'igroup-remove') + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + obj = NetAppOntapIgroupInitiator() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py new file mode 100644 index 000000000..6591cc9cd --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_info.py @@ -0,0 +1,1825 @@ +#!/usr/bin/python + +# (c) 2018 Piotr Olczak +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_info +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_info +author: Piotr Olczak (@dprts) +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +short_description: NetApp information gatherer +description: + - This module allows you to gather various information about ONTAP configuration +version_added: 2.9.0 +requirements: + - netapp_lib +options: + state: + type: str + description: + - deprecated as of 21.1.0. + - this option was ignored and continues to be ignored. + vserver: + type: str + description: + - If present, 'vserver tunneling' will limit the output to the vserver scope. + - Note that not all subsets are supported on a vserver, and 'all' will trigger an error. + version_added: '19.11.0' + gather_subset: + type: list + elements: str + description: + - When supplied, this argument will restrict the information collected to a given subset. Possible values for this argument include + - "active_directory_account_info" + - "aggregate_info" + - "aggr_efficiency_info" + - "autosupport_check_info" + - "cifs_options_info" + - "cifs_server_info" + - "cifs_share_info" + - "cifs_vserver_security_info" + - "cluster_identity_info" + - "cluster_image_info" + - "cluster_log_forwarding_info" + - "cluster_node_info" + - "cluster_peer_info" + - "cluster_switch_info" + - "clock_info" + - "disk_info" + - "env_sensors_info" + - "event_notification_destination_info" + - "event_notification_info" + - "export_policy_info" + - "export_rule_info" + - "fcp_adapter_info" + - "fcp_alias_info" + - "fcp_service_info" + - "igroup_info" + - "iscsi_service_info" + - "job_schedule_cron_info" + - "kerberos_realm_info" + - "ldap_client" + - "ldap_config" + - "license_info" + - "lun_info" + - "lun_map_info" + - "metrocluster_check_info" + - "metrocluster_info" + - "metrocluster_node_info" + - "net_dev_discovery_info" + - "net_dns_info" + - "net_failover_group_info" + - "net_firewall_info" + - "net_ifgrp_info" + - "net_interface_info" + - "net_interface_service_policy_info" + - "net_ipspaces_info" + - "net_port_info" + - "net_port_broadcast_domain_info" + - "net_routes_info" + - "net_vlan_info" + - "nfs_info" + - "ntfs_dacl_info" + - "ntfs_sd_info" + - "ntp_server_info" + - "nvme_info" + - "nvme_interface_info" + - "nvme_namespace_info" + - "nvme_subsystem_info" + - "ontap_system_version" + - "ontap_version" + - "ontapi_version" + - "qos_adaptive_policy_info" + - "qos_policy_info" + - "qtree_info" + - "quota_policy_info" + - "quota_report_info" + - "role_info" + - "security_key_manager_key_info" + - "security_login_account_info" + - "security_login_role_config_info" + - "security_login_role_info" + - "service_processor_info" + - "service_processor_network_info" + - "shelf_info" + - "sis_info" + - "sis_policy_info" + - "snapmirror_info" + - "snapmirror_destination_info" + - "snapmirror_policy_info" + - "snapshot_info" + - "snapshot_policy_info" + - "storage_failover_info" + - "storage_bridge_info" + - "subsys_health_info" + - "sysconfig_info" + - "sys_cluster_alerts" + - "volume_info" + - "volume_space_info" + - "vscan_info" + - "vscan_status_info" + - "vscan_scanner_pool_info" + - "vscan_connection_status_all_info" + - "vscan_connection_extended_stats_info" + - "vserver_info" + - "vserver_login_banner_info" + - "vserver_motd_info" + - "vserver_nfs_info" + - "vserver_peer_info" + - Can specify a list of values to include a larger subset. + - Values can also be used with an initial C(!) to specify that a specific subset should not be collected. + - nvme is supported with ONTAP 9.4 onwards. + - use "help" to get a list of supported information for your system. + - with lun_info, serial_hex and naa_id are computed when serial_number is present. + default: "all" + max_records: + type: int + description: + - Maximum number of records returned in a single ZAPI call. Valid range is [1..2^32-1]. + This parameter controls internal behavior of this module. + default: 1024 + version_added: '20.2.0' + summary: + description: + - Boolean flag to control return all attributes of the module info or only the names. + - If true, only names are returned. + default: false + type: bool + version_added: '20.4.0' + volume_move_target_aggr_info: + description: + - Required options for volume_move_target_aggr_info + type: dict + version_added: '20.5.0' + suboptions: + volume_name: + description: + - Volume name to get target aggr info for + required: true + type: str + version_added: '20.5.0' + vserver: + description: + - vserver the Volume lives on + required: true + type: str + version_added: '20.5.0' + desired_attributes: + description: + - Advanced feature requiring to understand ZAPI internals. + - Allows to request a specific attribute that is not returned by default, or to limit the returned attributes. + - A dictionary for the zapi desired-attributes element. + - An XML tag I(value) is a dictionary with tag as the key. + - Value can be another dictionary, a list of dictionaries, a string, or nothing. + - eg I() is represented as I(tag:) + - Only a single subset can be called at a time if this option is set. + - It is the caller responsibity to make sure key attributes are present in the right position. + - The module will error out if any key attribute is missing. + type: dict + version_added: '20.6.0' + query: + description: + - Advanced feature requiring to understand ZAPI internals. + - Allows to specify which objects to return. + - A dictionary for the zapi query element. + - An XML tag I(value) is a dictionary with tag as the key. + - Value can be another dictionary, a list of dictionaries, a string, or nothing. + - eg I() is represented as I(tag:) + - Only a single subset can be called at a time if this option is set. + type: dict + version_added: '20.7.0' + use_native_zapi_tags: + description: + - By default, I(-) in the returned dictionary keys are translated to I(_). + - If set to true, the translation is disabled. + type: bool + default: false + version_added: '20.6.0' + continue_on_error: + description: + - By default, this module fails on the first error. + - This option allows to provide a list of errors that are not failing the module. + - Errors in the list are reported in the output, under the related info element, as an "error" entry. + - Possible values are always, never, missing_vserver_api_error, rpc_error, other_error. + - missing_vserver_api_error - most likely the API is available at cluster level but not vserver level. + - rpc_error - some queries are failing because the node cannot reach another node in the cluster. + - key_error - a query is failing because the returned data does not contain an expected key. + - for key errors, make sure to report this in Slack. It may be a change in a new ONTAP version. + - other_error - anything not in the above list. + - always will continue on any error, never will fail on any error, they cannot be used with any other keyword. + type: list + elements: str + default: never +''' + +EXAMPLES = ''' +- name: Get NetApp info as Cluster Admin (Password Authentication) + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "admin" + password: "admins_password" + register: ontap_info +- debug: + msg: "{{ ontap_info.ontap_info }}" + +- name: Get NetApp version as Vserver admin + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "vsadmin" + vserver: trident_svm + password: "vsadmins_password" + +- name: run ontap info module using vserver tunneling and ignoring errors + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "admin" + password: "admins_password" + vserver: trident_svm + summary: true + continue_on_error: + - missing_vserver_api_error + - rpc_error + +- name: Limit Info Gathering to Aggregate Information as Cluster Admin + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: "aggregate_info" + register: ontap_info + +- name: Limit Info Gathering to Volume and Lun Information as Cluster Admin + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: + - volume_info + - lun_info + register: ontap_info + +- name: Gather all info except for volume and lun information as Cluster Admin + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: + - "!volume_info" + - "!lun_info" + register: ontap_info + +- name: Gather Volume move information for a specific volume + netapp.ontap.na_ontap_info: + hostname: "na-vsim" + username: "admin" + password: "admins_password" + gather_subset: volume_move_target_aggr_info + volume_move_target_aggr_info: + volume_name: carchitest + vserver: ansible + +- name: run ontap info module for aggregate module, requesting specific fields + netapp.ontap.na_ontap_info: + # <<: *login + gather_subset: aggregate_info + desired_attributes: + aggr-attributes: + aggr-inode-attributes: + files-private-used: + aggr-raid-attributes: + aggregate-type: + use_native_zapi_tags: true + register: ontap +- debug: var=ontap + +- name: run ontap info to get offline volumes with dp in the name + netapp.ontap.na_ontap_info: + # <<: *cert_login + gather_subset: volume_info + query: + volume-attributes: + volume-id-attributes: + name: '*dp*' + volume-state-attributes: + state: offline + desired_attributes: + volume-attributes: + volume-id-attributes: + name: + volume-state-attributes: + state: + register: ontap +- debug: var=ontap +''' + +RETURN = ''' +ontap_info: + description: Returns various information about NetApp cluster configuration + returned: always + type: dict + sample: '{ + "ontap_info": { + "active_directory_account_info": {...}, + "aggregate_info": {...}, + "autosupport_check_info": {...}, + "cluster_identity_info": {...}, + "cluster_image_info": {...}, + "cluster_node_info": {...}, + "igroup_info": {...}, + "iscsi_service_info": {...}, + "license_info": {...}, + "lun_info": {...}, + "metrocluster_check_info": {...}, + "metrocluster_info": {...}, + "metrocluster_node_info": {...}, + "net_dns_info": {...}, + "net_ifgrp_info": {...}, + "net_interface_info": {...}, + "net_interface_service_policy_info": {...}, + "net_port_info": {...}, + "ontap_system_version": {...}, + "ontap_version": {...}, + "ontapi_version": {...}, + "qos_policy_info": {...}, + "qos_adaptive_policy_info": {...}, + "qtree_info": {...}, + "quota_policy_info": {..}, + "quota_report_info": {...}, + "security_key_manager_key_info": {...}, + "security_login_account_info": {...}, + "snapmirror_info": {...} + "snapmirror_destination_info": {...} + "storage_bridge_info": {...} + "storage_failover_info": {...}, + "volume_info": {...}, + "vserver_login_banner_info": {...}, + "vserver_motd_info": {...}, + "vserver_info": {...}, + "vserver_nfs_info": {...}, + "vscan_status_info": {...}, + "vscan_scanner_pool_info": {...}, + "vscan_connection_status_all_info": {...}, + "vscan_connection_extended_stats_info": {...} + }' +''' + +import codecs +import copy +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native, to_text +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +IMPORT_ERRORS = [] +try: + import xmltodict + HAS_XMLTODICT = True +except ImportError as exc: + HAS_XMLTODICT = False + IMPORT_ERRORS.append(str(exc)) + +try: + import json + HAS_JSON = True +except ImportError as exc: + HAS_JSON = False + IMPORT_ERRORS.append(str(exc)) + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPGatherInfo: + '''Class with gather info methods''' + + def __init__(self): + ''' create module, set up context''' + argument_spec = netapp_utils.na_ontap_zapi_only_spec() + argument_spec.update(dict( + state=dict(type='str'), + gather_subset=dict(default=['all'], type='list', elements='str'), + vserver=dict(type='str', required=False), + max_records=dict(type='int', default=1024, required=False), + summary=dict(type='bool', default=False, required=False), + volume_move_target_aggr_info=dict( + type="dict", + required=False, + options=dict( + volume_name=dict(type='str', required=True), + vserver=dict(type='str', required=True) + ) + ), + desired_attributes=dict(type='dict', required=False), + use_native_zapi_tags=dict(type='bool', required=False, default=False), + continue_on_error=dict(type='list', required=False, elements='str', default=['never']), + query=dict(type='dict', required=False), + )) + + self.module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not HAS_NETAPP_LIB: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if not HAS_XMLTODICT: + self.module.fail_json(msg="the python xmltodict module is required. Import error: %s" % str(IMPORT_ERRORS)) + if not HAS_JSON: + self.module.fail_json(msg="the python json module is required. Import error: %s" % str(IMPORT_ERRORS)) + + self.max_records = str(self.module.params['max_records']) + volume_move_target_aggr_info = self.module.params.get('volume_move_target_aggr_info', dict()) + if volume_move_target_aggr_info is None: + volume_move_target_aggr_info = {} + self.netapp_info = {} + self.desired_attributes = self.module.params['desired_attributes'] + self.query = self.module.params['query'] + self.translate_keys = not self.module.params['use_native_zapi_tags'] + self.warnings = [] # warnings will be added to the info results, if any + self.set_error_flags() + self.module.warn('The module only supports ZAPI and is deprecated, and will no longer work with newer versions ' + 'of ONTAP when ONTAPI is deprecated in CY22-Q4') + self.module.warn('netapp.ontap.na_ontap_rest_info should be used instead.') + + # thanks to coreywan (https://github.com/ansible/ansible/pull/47016) + # for starting this + # min_version identifies the ontapi version which supports this ZAPI + # use 0 if it is supported since 9.1 + self.info_subsets = { + 'cluster_identity_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-identity-get', + 'attributes_list_tag': 'attributes', + 'attribute': 'cluster-identity-info', + 'key_fields': 'cluster-name', + }, + 'min_version': '0', + }, + 'cluster_image_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-image-get-iter', + 'attribute': 'cluster-image-info', + 'key_fields': 'node-id', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cluster_log_forwarding_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-log-forward-get-iter', + 'attribute': 'cluster-log-forward-info', + 'key_fields': ('destination', 'port'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cluster_node_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-node-get-iter', + 'attribute': 'cluster-node-info', + 'key_fields': 'node-name', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'security_login_account_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-login-get-iter', + 'attribute': 'security-login-account-info', + 'key_fields': ('vserver', 'user-name', 'application', 'authentication-method'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'security_login_role_config_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-login-role-config-get-iter', + 'attribute': 'security-login-role-config-info', + 'key_fields': ('vserver', 'role-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'security_login_role_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-login-role-get-iter', + 'attribute': 'security-login-role-info', + 'key_fields': ('vserver', 'role-name', 'command-directory-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'active_directory_account_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'active-directory-account-get-iter', + 'attribute': 'active-directory-account-config', + 'key_fields': ('vserver', 'account-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'aggregate_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'aggr-get-iter', + 'attribute': 'aggr-attributes', + 'key_fields': 'aggregate-name', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'volume_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'volume-get-iter', + 'attribute': 'volume-attributes', + 'key_fields': ('name', 'owning-vserver-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'license_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'license-v2-list-info', + 'attributes_list_tag': None, + 'attribute': 'licenses', + }, + 'min_version': '0', + }, + 'lun_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'lun-get-iter', + 'attribute': 'lun-info', + 'key_fields': ('vserver', 'path'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'metrocluster_check_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'metrocluster-check-get-iter', + 'attribute': 'metrocluster-check-info', + 'fail_on_error': False, + }, + 'min_version': '0', + }, + 'metrocluster_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'metrocluster-get', + 'attribute': 'metrocluster-info', + 'attributes_list_tag': 'attributes', + }, + 'min_version': '0', + }, + 'metrocluster_node_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'metrocluster-node-get-iter', + 'attribute': 'metrocluster-node-info', + 'key_fields': ('cluster-name', 'node-name'), + }, + 'min_version': '0', + }, + 'net_dns_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-dns-get-iter', + 'attribute': 'net-dns-info', + 'key_fields': 'vserver-name', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_interface_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-interface-get-iter', + 'attribute': 'net-interface-info', + 'key_fields': ('interface-name', 'vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_interface_service_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-interface-service-policy-get-iter', + 'attribute': 'net-interface-service-policy-info', + 'key_fields': ('vserver', 'policy'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '150', + }, + 'net_port_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-port-get-iter', + 'attribute': 'net-port-info', + 'key_fields': ('node', 'port'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'security_key_manager_key_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-key-manager-key-get-iter', + 'attribute': 'security-key-manager-key-info', + 'key_fields': ('node', 'key-id'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'storage_failover_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cf-get-iter', + 'attribute': 'storage-failover-info', + 'key_fields': 'node', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vserver_motd_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-motd-get-iter', + 'attribute': 'vserver-motd-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vserver_login_banner_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-login-banner-get-iter', + 'attribute': 'vserver-login-banner-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vserver_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-get-iter', + 'attribute': 'vserver-info', + 'key_fields': 'vserver-name', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vserver_nfs_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nfs-service-get-iter', + 'attribute': 'nfs-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_ifgrp_info': { + 'method': self.get_ifgrp_info, + 'kwargs': {}, + 'min_version': '0', + }, + 'ontap_system_version': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'system-get-version', + 'attributes_list_tag': None, + }, + 'min_version': '0', + }, + 'ontap_version': { + 'method': self.ontapi, + 'kwargs': {}, + 'min_version': '0', + }, + 'ontapi_version': { + 'method': self.ontapi, + 'kwargs': {}, + 'min_version': '0', + }, + 'clock_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'clock-get-clock', + 'attributes_list_tag': None, + }, + 'min_version': '0' + }, + 'system_node_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'system-node-get-iter', + 'attribute': 'node-details-info', + 'key_fields': 'node', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'igroup_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'igroup-get-iter', + 'attribute': 'initiator-group-info', + 'key_fields': ('vserver', 'initiator-group-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'iscsi_service_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'iscsi-service-get-iter', + 'attribute': 'iscsi-service-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'qos_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'qos-policy-group-get-iter', + 'attribute': 'qos-policy-group-info', + 'key_fields': 'policy-group', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'qtree_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'qtree-list-iter', + 'attribute': 'qtree-info', + 'key_fields': ('vserver', 'volume', 'id'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'quota_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'quota-policy-get-iter', + 'attribute': 'quota-policy-info', + 'key_fields': ('vserver', 'policy-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'quota_report_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'quota-report-iter', + 'attribute': 'quota', + 'key_fields': ('vserver', 'volume', 'tree', 'quota-type', 'quota-target'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vscan_status_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vscan-status-get-iter', + 'attribute': 'vscan-status-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vscan_scanner_pool_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vscan-scanner-pool-get-iter', + 'attribute': 'vscan-scanner-pool-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vscan_connection_status_all_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vscan-connection-status-all-get-iter', + 'attribute': 'vscan-connection-status-all-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vscan_connection_extended_stats_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vscan-connection-extended-stats-get-iter', + 'attribute': 'vscan-connection-extended-stats-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'snapshot_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'snapshot-get-iter', + 'attribute': 'snapshot-info', + 'key_fields': ('vserver', 'volume', 'name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'storage_bridge_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'storage-bridge-get-iter', + 'attribute': 'storage-bridge-info', + 'key_fields': 'name', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + # supported in ONTAP 9.3 and onwards + 'qos_adaptive_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'qos-adaptive-policy-group-get-iter', + 'attribute': 'qos-adaptive-policy-group-info', + 'key_fields': 'policy-group', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '130', + }, + # supported in ONTAP 9.4 and onwards + 'nvme_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-get-iter', + 'attribute': 'nvme-target-service-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + 'nvme_interface_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-interface-get-iter', + 'attribute': 'nvme-interface-info', + 'key_fields': 'vserver', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + 'nvme_subsystem_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-subsystem-get-iter', + 'attribute': 'nvme-subsystem-info', + 'key_fields': 'subsystem', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + 'nvme_namespace_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nvme-namespace-get-iter', + 'attribute': 'nvme-namespace-info', + 'key_fields': 'path', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + + # Alpha Order + + 'aggr_efficiency_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'aggr-efficiency-get-iter', + 'attribute': 'aggr-efficiency-info', + # the preferred key is node_name:aggregate_name + # but node is not present with MCC + 'key_fields': (('node', None), 'aggregate'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + 'autosupport_check_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'autosupport-check-iter', + 'attribute': 'autosupport-check-info', + 'key_fields': ('node-name', 'check-type', 'error-detail'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cifs_options_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cifs-options-get-iter', + 'attribute': 'cifs-options', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cifs_server_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cifs-server-get-iter', + 'attribute': 'cifs-server-config', + # preferred key is :: + # alternate key is :: + 'key_fields': ('vserver', ('domain', 'domain-workgroup'), 'cifs-server'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cifs_share_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cifs-share-get-iter', + 'attribute': 'cifs-share', + 'key_fields': ('share-name', 'path', 'cifs-server'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cifs_vserver_security_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cifs-security-get-iter', + 'attribute': 'cifs-security', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cluster_peer_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-peer-get-iter', + 'attribute': 'cluster-peer-info', + 'key_fields': ('cluster-name', 'remote-cluster-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'cluster_switch_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'cluster-switch-get-iter', + 'attribute': 'cluster-switch-info', + 'key_fields': ('device', 'model', 'serial-number'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '160', + }, + 'disk_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'storage-disk-get-iter', + 'attribute': 'storage-disk-info', + 'key_fields': ('disk-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'env_sensors_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'environment-sensors-get-iter', + 'attribute': 'environment-sensors-info', + 'key_fields': ('node-name', 'sensor-name'), + 'query': {'max-records': self.max_records}, + 'fail_on_error': False, + }, + 'min_version': '0', + }, + 'event_notification_destination_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'ems-event-notification-destination-get-iter', + 'attribute': 'event-notification-destination-info', + 'key_fields': ('name', 'type'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'event_notification_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'ems-event-notification-get-iter', + 'attribute': 'event-notification', + 'key_fields': ('id'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'export_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'export-policy-get-iter', + 'attribute': 'export-policy-info', + 'key_fields': ('vserver', 'policy-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'export_rule_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'export-rule-get-iter', + 'attribute': 'export-rule-info', + 'key_fields': ('vserver-name', 'policy-name', 'rule-index'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'fcp_adapter_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'ucm-adapter-get-iter', + 'attribute': 'uc-adapter-info', + 'key_fields': ('adapter-name', 'node-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'fcp_alias_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'fcp-wwpnalias-get-iter', + 'attribute': 'aliases-info', + 'key_fields': ('aliases-alias', 'vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'fcp_service_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'fcp-service-get-iter', + 'attribute': 'fcp-service-info', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'job_schedule_cron_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'job-schedule-cron-get-iter', + 'attribute': 'job-schedule-cron-info', + 'key_fields': ('job-schedule-name', ('job-schedule-cluster', None)), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'kerberos_realm_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'kerberos-realm-get-iter', + 'attribute': 'kerberos-realm', + 'key_fields': ('vserver-name', 'realm'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'ldap_client': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'ldap-client-get-iter', + 'attribute': 'ldap-client', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'ldap_config': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'ldap-config-get-iter', + 'attribute': 'ldap-config', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'lun_map_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'lun-map-get-iter', + 'attribute': 'lun-map-info', + 'key_fields': ('initiator-group', 'lun-id', 'node', 'path', 'vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_dev_discovery_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-device-discovery-get-iter', + 'attribute': 'net-device-discovery-info', + 'key_fields': ('port'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_failover_group_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-failover-group-get-iter', + 'attribute': 'net-failover-group-info', + 'key_fields': ('vserver', 'failover-group'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_firewall_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-firewall-policy-get-iter', + 'attribute': 'net-firewall-policy-info', + 'key_fields': ('policy', 'vserver', 'service'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_ipspaces_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-ipspaces-get-iter', + 'attribute': 'net-ipspaces-info', + 'key_fields': ('ipspace'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_port_broadcast_domain_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-port-broadcast-domain-get-iter', + 'attribute': 'net-port-broadcast-domain-info', + 'key_fields': ('broadcast-domain', 'ipspace'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_routes_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-routes-get-iter', + 'attribute': 'net-vs-routes-info', + 'key_fields': ('vserver', 'destination', 'gateway'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'net_vlan_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'net-vlan-get-iter', + 'attribute': 'vlan-info', + 'key_fields': ('interface-name', 'node'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'nfs_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'nfs-service-get-iter', + 'attribute': 'nfs-info', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'ntfs_dacl_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'file-directory-security-ntfs-dacl-get-iter', + 'attribute': 'file-directory-security-ntfs-dacl', + 'key_fields': ('vserver', 'ntfs-sd', 'account', 'access-type'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'ntfs_sd_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'file-directory-security-ntfs-get-iter', + 'attribute': 'file-directory-security-ntfs', + 'key_fields': ('vserver', 'ntfs-sd'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'ntp_server_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'ntp-server-get-iter', + 'attribute': 'ntp-server-info', + 'key_fields': ('server-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'role_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'security-login-role-get-iter', + 'attribute': 'security-login-role-info', + 'key_fields': ('vserver', 'role-name', 'access-level', 'command-directory-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'service_processor_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'service-processor-get-iter', + 'attribute': 'service-processor-info', + 'key_fields': ('node'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'service_processor_network_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'service-processor-network-get-iter', + 'attribute': 'service-processor-network-info', + # don't use key_fieldss, as we cannot build a key with optional key_fieldss + # without a key, we'll get a list of dictionaries + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'shelf_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'storage-shelf-info-get-iter', + 'attribute': 'storage-shelf-info', + 'key_fields': ('shelf-id', 'serial-number'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'sis_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'sis-get-iter', + 'attribute': 'sis-status-info', + 'key_fields': 'path', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'sis_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'sis-policy-get-iter', + 'attribute': 'sis-policy-info', + 'key_fields': ('vserver', 'policy-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'snapmirror_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'snapmirror-get-iter', + 'attribute': 'snapmirror-info', + 'key_fields': 'destination-location', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + 'snapmirror_destination_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'snapmirror-get-destination-iter', + 'attribute': 'snapmirror-destination-info', + 'key_fields': 'destination-location', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '140', + }, + 'snapmirror_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'snapmirror-policy-get-iter', + 'attribute': 'snapmirror-policy-info', + 'key_fields': ('vserver-name', 'policy-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'snapshot_policy_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'snapshot-policy-get-iter', + 'attribute': 'snapshot-policy-info', + 'key_fields': ('vserver-name', 'policy'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'subsys_health_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'diagnosis-subsystem-config-get-iter', + 'attribute': 'diagnosis-subsystem-config-info', + 'key_fields': 'subsystem', + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'sys_cluster_alerts': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'diagnosis-alert-get-iter', + 'attribute': 'diagnosis-alert-info', + 'key_fields': ('node', 'alerting-resource'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'sysconfig_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'system-get-node-info-iter', + 'attribute': 'system-info', + 'key_fields': ('system-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'volume_move_target_aggr_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'volume-move-target-aggr-get-iter', + 'attribute': 'volume-move-target-aggr-info', + 'query': {'max-records': self.max_records, + 'volume-name': volume_move_target_aggr_info.get('volume_name', None), + 'vserver': volume_move_target_aggr_info.get('vserver', None)}, + 'fail_on_error': False, + }, + 'min_version': '0', + }, + 'volume_space_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'volume-space-get-iter', + 'attribute': 'space-info', + 'key_fields': ('vserver', 'volume'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vscan_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vscan-status-get-iter', + 'attribute': 'vscan-status-info', + 'key_fields': ('vserver'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + 'vserver_peer_info': { + 'method': self.get_generic_get_iter, + 'kwargs': { + 'call': 'vserver-peer-get-iter', + 'attribute': 'vserver-peer-info', + 'key_fields': ('vserver', 'remote-vserver-name'), + 'query': {'max-records': self.max_records}, + }, + 'min_version': '0', + }, + } + + # use vserver tunneling if vserver is present (not None) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.module.params['vserver']) + + def ontapi(self): + '''Method to get ontapi version''' + + api = 'system-get-ontapi-version' + api_call = netapp_utils.zapi.NaElement(api) + try: + results = self.server.invoke_successfully(api_call, enable_tunneling=True) + ontapi_version = results.get_child_content('minor-version') + return ontapi_version if ontapi_version is not None else '0' + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error calling API %s: %s" % + (api, to_native(error)), exception=traceback.format_exc()) + + def call_api(self, call, attributes_list_tag='attributes-list', query=None, fail_on_error=True): + '''Main method to run an API call''' + + api_call = netapp_utils.zapi.NaElement(call) + initial_result = None + result = None + + if query: + for key, val in query.items(): + # Can val be nested? + api_call.add_new_child(key, val) + + if self.desired_attributes is not None: + api_call.translate_struct(self.desired_attributes) + if self.query is not None: + api_call.translate_struct(self.query) + try: + initial_result = self.server.invoke_successfully(api_call, enable_tunneling=True) + next_tag = initial_result.get_child_by_name('next-tag') + result = copy.copy(initial_result) + + while next_tag: + next_tag_call = netapp_utils.zapi.NaElement(call) + if query: + for key, val in query.items(): + next_tag_call.add_new_child(key, val) + + next_tag_call.add_new_child("tag", next_tag.get_content(), True) + next_result = self.server.invoke_successfully(next_tag_call, enable_tunneling=True) + + next_tag = next_result.get_child_by_name('next-tag') + if attributes_list_tag is None: + self.module.fail_json(msg="Error calling API %s: %s" % + (api_call.to_string(), "'next-tag' is not expected for this API")) + + result_attr = result.get_child_by_name(attributes_list_tag) + new_records = next_result.get_child_by_name(attributes_list_tag) + if new_records: + for record in new_records.get_children(): + result_attr.add_child_elem(record) + + return result, None + + except netapp_utils.zapi.NaApiError as error: + if call in ['security-key-manager-key-get-iter']: + return result, None + kind, error_message = netapp_utils.classify_zapi_exception(error) + if kind == 'missing_vserver_api_error': + # for missing_vserver_api_error, the API is already in error_message + error_message = "Error invalid API. %s" % error_message + else: + error_message = "Error calling API %s: %s" % (call, error_message) + if self.error_flags[kind] and fail_on_error: + self.module.fail_json(msg=error_message, exception=traceback.format_exc()) + return None, error_message + + def get_ifgrp_info(self): + '''Method to get network port ifgroups info''' + + try: + net_port_info = self.netapp_info['net_port_info'] + except KeyError: + net_port_info_calls = self.info_subsets['net_port_info'] + net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs']) + interfaces = net_port_info.keys() + + ifgrps = [] + for ifn in interfaces: + if net_port_info[ifn]['port_type'] == 'if_group': + ifgrps.append(ifn) + + net_ifgrp_info = dict() + for ifgrp in ifgrps: + query = dict() + query['node'], query['ifgrp-name'] = ifgrp.split(':') + + tmp = self.get_generic_get_iter('net-port-ifgrp-get', key_fields=('node', 'ifgrp-name'), + attribute='net-ifgrp-info', query=query, + attributes_list_tag='attributes') + net_ifgrp_info = net_ifgrp_info.copy() + net_ifgrp_info.update(tmp) + return net_ifgrp_info + + def get_generic_get_iter(self, call, attribute=None, key_fields=None, query=None, attributes_list_tag='attributes-list', fail_on_error=True): + '''Method to run a generic get-iter call''' + + generic_call, error = self.call_api(call, attributes_list_tag, query, fail_on_error=fail_on_error) + + if error is not None: + return {'error': error} + + if generic_call is None: + return None + + if attributes_list_tag is None: + attributes_list = generic_call + else: + attributes_list = generic_call.get_child_by_name(attributes_list_tag) + + if attributes_list is None: + return None + + if key_fields is None: + out = [] + else: + out = {} + + iteration = 0 + for child in attributes_list.get_children(): + iteration += 1 + dic = xmltodict.parse(child.to_string(), xml_attribs=False) + + if attribute is not None: + try: + dic = dic[attribute] + except KeyError as exc: + error_message = 'Error: attribute %s not found for %s, got: %s' % (str(exc), call, dic) + self.module.fail_json(msg=error_message, exception=traceback.format_exc()) + + info = json.loads(json.dumps(dic)) + if self.translate_keys: + info = convert_keys(info) + if isinstance(key_fields, str): + try: + unique_key = _finditem(dic, key_fields) + except KeyError as exc: + error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info)) + if self.error_flags['key_error']: + self.module.fail_json(msg=error_message, exception=traceback.format_exc()) + unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0]) + elif isinstance(key_fields, tuple): + try: + unique_key = ':'.join([_finditem(dic, el) for el in key_fields]) + except KeyError as exc: + error_message = 'Error: key %s not found for %s, got: %s' % (str(exc), call, repr(info)) + if self.error_flags['key_error']: + self.module.fail_json(msg=error_message, exception=traceback.format_exc()) + unique_key = 'Error_%d_key_not_found_%s' % (iteration, exc.args[0]) + else: + unique_key = None + if unique_key is not None: + out = out.copy() + out.update({unique_key: info}) + else: + out.append(info) + + if attributes_list_tag is None and key_fields is None: + if len(out) == 1: + # flatten the list as only 1 element is expected + out = out[0] + elif len(out) > 1: + # aggregate a list of dictionaries into a single dict + # make sure we only have dicts and no key duplication + dic = dict() + key_count = 0 + for item in out: + if not isinstance(item, dict): + # abort if we don't see a dict - not sure this can happen with ZAPI + key_count = -1 + break + dic.update(item) + key_count += len(item) + if key_count == len(dic): + # no duplicates! + out = dic + + return out + + def augment_subset(self, subset, info): + if subset == 'lun_info' and info: + for lun_info in info.values(): + # the keys may have been converted, or not + serial = lun_info.get('serial_number') or lun_info.get('serial-number') + if serial: + hexlify = codecs.getencoder('hex') + # dictionaries are mutable + lun_info['serial_hex'] = to_text(hexlify(to_bytes(lun_info['serial_number']))[0]) + lun_info['naa_id'] = 'naa.600a0980' + lun_info['serial_hex'] + return info + + def get_all(self, gather_subset): + '''Method to get all subsets''' + + self.netapp_info['ontapi_version'] = self.ontapi() + self.netapp_info['ontap_version'] = self.netapp_info['ontapi_version'] + + run_subset = self.get_subset(gather_subset, self.netapp_info['ontapi_version']) + if 'ontap_version' in gather_subset: + if netapp_utils.has_feature(self.module, 'deprecation_warning'): + self.netapp_info['deprecation_warning'] = 'ontap_version is deprecated, please use ontapi_version' + if 'help' in gather_subset: + self.netapp_info['help'] = sorted(run_subset) + else: + if self.desired_attributes is not None: + if len(run_subset) > 1: + self.module.fail_json(msg="desired_attributes option is only supported with a single subset") + self.sanitize_desired_attributes() + if self.query is not None: + if len(run_subset) > 1: + self.module.fail_json(msg="query option is only supported with a single subset") + self.sanitize_query() + for subset in run_subset: + call = self.info_subsets[subset] + self.netapp_info[subset] = call['method'](**call['kwargs']) + self.augment_subset(subset, self.netapp_info[subset]) + + if self.warnings: + self.netapp_info['module_warnings'] = self.warnings + + return self.netapp_info + + def get_subset(self, gather_subset, version): + '''Method to get a single subset''' + + runable_subsets = set() + exclude_subsets = set() + usable_subsets = [key for key in self.info_subsets if version >= self.info_subsets[key]['min_version']] + if 'help' in gather_subset: + return usable_subsets + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(usable_subsets) + return runable_subsets + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + return set() + exclude = True + else: + exclude = False + + if subset not in usable_subsets: + if subset not in self.info_subsets.keys(): + self.module.fail_json(msg='Bad subset: %s' % subset) + self.module.fail_json(msg='Remote system at version %s does not support %s' % + (version, subset)) + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(usable_subsets) + + runable_subsets.difference_update(exclude_subsets) + + return runable_subsets + + def get_summary(self, ontap_info): + for info in ontap_info: + if '_info' in info and ontap_info[info] is not None and isinstance(ontap_info[info], dict): + # don't summarize errors + if 'error' not in ontap_info[info]: + ontap_info[info] = ontap_info[info].keys() + return ontap_info + + def sanitize_desired_attributes(self): + ''' add top 'desired-attributes' if absent + check for _ as more likely ZAPI does not take them + ''' + da_key = 'desired-attributes' + if da_key not in self.desired_attributes: + desired_attributes = dict() + desired_attributes[da_key] = self.desired_attributes + self.desired_attributes = desired_attributes + self.check_for___in_keys(self.desired_attributes) + + def sanitize_query(self): + ''' add top 'query' if absent + check for _ as more likely ZAPI does not take them + ''' + key = 'query' + if key not in self.query: + query = dict() + query[key] = self.query + self.query = query + self.check_for___in_keys(self.query) + + def check_for___in_keys(self, d_param): + '''Method to warn on underscore in a ZAPI tag''' + if isinstance(d_param, dict): + for key, val in d_param.items(): + self.check_for___in_keys(val) + if '_' in key: + self.warnings.append("Underscore in ZAPI tag: %s, do you mean '-'?" % key) + elif isinstance(d_param, list): + for val in d_param: + self.check_for___in_keys(val) + + def set_error_flags(self): + error_flags = self.module.params['continue_on_error'] + generic_flags = ('always', 'never') + if len(error_flags) > 1: + for key in generic_flags: + if key in error_flags: + self.module.fail_json(msg="%s needs to be the only keyword in 'continue_on_error' option." % key) + specific_flags = ('rpc_error', 'missing_vserver_api_error', 'key_error', 'other_error') + for key in error_flags: + if key not in generic_flags and key not in specific_flags: + self.module.fail_json(msg="%s is not a valid keyword in 'continue_on_error' option." % key) + self.error_flags = dict() + for flag in specific_flags: + self.error_flags[flag] = True + for key in error_flags: + if key == 'always' or key == flag: + self.error_flags[flag] = False + + def apply(self): + gather_subset = self.module.params['gather_subset'] + if gather_subset is None: + gather_subset = ['all'] + gf_all = self.get_all(gather_subset) + if self.module.params['summary']: + gf_all = self.get_summary(gf_all) + results = {'changed': False, 'ontap_info': gf_all} + if self.module.params['state'] is not None: + results['state'] = self.module.params['state'] + results['warnings'] = "option 'state' is deprecated." + self.module.warn("option 'state' is deprecated.") + self.module.exit_json(**results) + + +# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary +def __finditem(obj, key): + + if key is None: + # allows for a key not to be present + return "key_not_present" + if key in obj: + if obj[key] is None: + return "None" + return obj[key] + for dummy, val in obj.items(): + if isinstance(val, dict): + item = __finditem(val, key) + if item is not None: + return item + return None + + +def _finditem(obj, keys): + ''' if keys is a string, use it as a key + if keys is a tuple, stop on the first valid key + if no valid key is found, raise a KeyError ''' + + value = None + if isinstance(keys, str): + value = __finditem(obj, keys) + elif isinstance(keys, tuple): + for key in keys: + value = __finditem(obj, key) + if value is not None: + break + if value is not None: + return value + raise KeyError(str(keys)) + + +def convert_keys(d_param): + '''Method to convert hyphen to underscore''' + + if isinstance(d_param, dict): + out = {} + for key, val in d_param.items(): + val = convert_keys(val) + out[key.replace('-', '_')] = val + return out + elif isinstance(d_param, list): + return [convert_keys(val) for val in d_param] + return d_param + + +def main(): + '''Execute action''' + gf_obj = NetAppONTAPGatherInfo() + gf_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py new file mode 100644 index 000000000..4f859adfd --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_interface.py @@ -0,0 +1,1457 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_interface +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_interface +short_description: NetApp ONTAP LIF configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Creating / deleting and modifying the LIF. + +options: + state: + description: + - Whether the specified interface should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + interface_name: + description: + - Specifies the logical interface (LIF) name. + required: true + type: str + + home_node: + description: + - Specifies the LIF's home node. + - By default, the first node from the cluster is considered as home node. + type: str + + current_node: + description: + - Specifies the LIF's current node. + - By default, this is home_node + type: str + + home_port: + description: + - Specifies the LIF's home port. + - Requires ONTAP 9.8 or later with FC interfaces when using REST. + - With REST, at least one of home_port, home_node, or broadcast_domain is required to create IP interfaces. + - With REST, either home_port or current_port is required to create FC interfaces. + - With ZAPI, home_port is required to create IP and FC interfaces. + - home_port and broadcast_domain are mutually exclusive (REST and IP interfaces). + type: str + + current_port: + description: + - Specifies the LIF's current port. + type: str + + role: + description: + - Specifies the role of the LIF. + - When setting role as "intercluster" or "cluster", setting protocol is not supported. + - When creating a "cluster" role, the node name will appear as the prefix in the name of LIF. + - For example, if the specified name is clif and node name is node1, the LIF name appears in the ONTAP as node1_clif. + - Possible values are 'undef', 'cluster', 'data', 'node-mgmt', 'intercluster', 'cluster-mgmt'. + - Required when C(state=present) unless service_policy is present and ONTAP version is 9.8 or better. + - This option is deprecated in REST. + - With REST, the module tries to derive a service_policy and may error out. + type: str + + address: + description: + - Specifies the LIF's IP address. + - ZAPI - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set. + - REST - Required when C(state=present) and C(interface_type) is IP. + type: str + + netmask: + description: + - Specifies the LIF's netmask. + - ZAPI - Required when C(state=present) and is_ipv4_link_local if false and subnet_name is not set. + - REST - Required when C(state=present) and C(interface_type) is IP. + type: str + + is_ipv4_link_local: + description: + - Specifies the LIF's are to acquire a ipv4 link local address. + - Use case for this is when creating Cluster LIFs to allow for auto assignment of ipv4 link local address. + - Not supported in REST + version_added: '20.1.0' + type: bool + + vserver: + description: + - The name of the vserver to use. + - Required with ZAPI. + - Required with REST for FC interfaces (data vservers). + - Required with REST for SVM-scoped IP interfaces (data vservers). + - Invalid with REST for cluster-scoped IP interfaces. + - To help with transition from ZAPI to REST, vserver is ignored when the role is set to 'cluster', 'node-mgmt', 'intercluster', 'cluster-mgmt'. + - Remove this option to suppress the warning. + required: false + type: str + + firewall_policy: + description: + - Specifies the firewall policy for the LIF. + - This option is deprecated in REST. + - With REST, the module tries to derive a service_policy and may error out. + type: str + + failover_policy: + description: + - Specifies the failover policy for the LIF. + - When using REST, this values are mapped to 'home_port_only', 'default', 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only'. + choices: ['disabled', 'system-defined', 'local-only', 'sfo-partner-only', 'broadcast-domain-wide'] + type: str + + failover_scope: + description: + - Specifies the failover scope for the LIF. + - REST only, and only for IP interfaces. Not supported for FC interfaces. + choices: ['home_port_only', 'default', 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only'] + type: str + version_added: '21.13.0' + + failover_group: + description: + - Specifies the failover group for the LIF. + - Not supported with REST. + version_added: '20.1.0' + type: str + + subnet_name: + description: + - Subnet where the IP interface address is allocated from. + - If the option is not used, the IP address and netmask need to be provided. + - With REST, ONTAP 9.11.1 or later is required. + - With REST, ipspace must be set. + version_added: 2.8.0 + type: str + + fail_if_subnet_conflicts: + description: + - Creating or updating an IP Interface fails if the specified IP address falls within the address range of a named subnet. + - Set this value to false to use the specified IP address and to assign the subnet owning that address to the interface. + - This option is only supported with REST and requires ONTAP 9.11.1 or later. + version_added: 22.2.0 + type: bool + + admin_status: + choices: ['up', 'down'] + description: + - Specifies the administrative status of the LIF. + type: str + + is_auto_revert: + description: + - If true, data LIF will revert to its home node under certain circumstances such as startup, + - and load balancing migration capability is disabled automatically + type: bool + + force_subnet_association: + description: + - Set this to true to acquire the address from the named subnet and assign the subnet to the LIF. + - not supported with REST. + version_added: 2.9.0 + type: bool + + protocols: + description: + - Specifies the list of data protocols configured on the LIF. By default, the values in this element are nfs, cifs and fcache. + - Other supported protocols are iscsi and fcp. A LIF can be configured to not support any data protocols by specifying 'none'. + - Protocol values of none, iscsi, fc-nvme or fcp can't be combined with any other data protocol(s). + - address, netmask and firewall_policy parameters are not supported for 'fc-nvme' option. + - This option is ignored with REST, though it can be used to derive C(interface_type) or C(data_protocol). + type: list + elements: str + + data_protocol: + description: + - The data protocol for which the FC interface is configured. + - Ignored with ZAPI or for IP interfaces. + - Required to create a FC type interface. + type: str + choices: ['fcp', 'fc_nvme'] + + dns_domain_name: + description: + - Specifies the unique, fully qualified domain name of the DNS zone of this LIF. + - Supported from ONTAP 9.9.0 or later in REST. + - Not supported for FC interfaces. + version_added: 2.9.0 + type: str + + listen_for_dns_query: + description: + - If True, this IP address will listen for DNS queries for the dnszone specified. + - Not supported with REST. + version_added: 2.9.0 + type: bool + + is_dns_update_enabled: + description: + - Specifies if DNS update is enabled for this LIF. Dynamic updates will be sent for this LIF if updates are enabled at Vserver level. + - Supported from ONTAP 9.9.1 or later in REST. + - Not supported for FC interfaces. + version_added: 2.9.0 + type: bool + + service_policy: + description: + - Starting with ONTAP 9.5, you can configure LIF service policies to identify a single service or a list of services that will use a LIF. + - In ONTAP 9.5, you can assign service policies only for LIFs in the admin SVM. + - In ONTAP 9.6, you can additionally assign service policies for LIFs in the data SVMs. + - When you specify a service policy for a LIF, you need not specify the data protocol and role for the LIF. + - NOTE that role is still required because of a ZAPI issue. This limitation is removed in ONTAP 9.8. + - Creating LIFs by specifying the role and data protocols is also supported. + version_added: '20.4.0' + type: str + + from_name: + description: name of the interface to be renamed + type: str + version_added: 21.11.0 + + interface_type: + description: + - type of the interface. + - IP is assumed if address or netmask are present. + - IP interfaces includes cluster, intercluster, management, and NFS, CIFS, iSCSI interfaces. + - FC interfaces includes FCP and NVME-FC interfaces. + - ignored with ZAPI. + - required with REST, but maybe derived from deprecated options like C(role), C(protocols), and C(firewall_policy). + type: str + choices: ['fc', 'ip'] + version_added: 21.13.0 + + ipspace: + description: + - IPspace name is required with REST for cluster-scoped interfaces. It is optional with SVM scope. + - ignored with ZAPI. + - ignored for FC interface. + type: str + version_added: 21.13.0 + + broadcast_domain: + description: + - broadcast_domain name can be used to specify the location on an IP interface with REST, as an alternative to node or port. + - only used when creating an IP interface to select a node, ignored if the interface already exists. + - if the broadcast domain is not found, make sure to check the ipspace value. + - home_port and broadcast_domain are mutually exclusive. home_node may or may not be present. + - not supported for FC interface. + - ignored with ZAPI. + type: str + version_added: 21.21.0 + + ignore_zapi_options: + description: + - ignore unsupported options that should not be relevant. + - ignored with ZAPI. + choices: ['failover_group', 'force_subnet_association', 'listen_for_dns_query'] + type: list + elements: str + default: ['force_subnet_association'] + version_added: 21.13.0 + + probe_port: + description: + - Probe port for Cloud load balancer - only valid in the Azure environment. + - Not supported with ZAPI or with FC interfaces. + - Requires ONTAP 9.10.1 or later. + type: int + version_added: 22.1.0 +notes: + - REST support requires ONTAP 9.7 or later. + - Support check_mode. +''' + +EXAMPLES = ''' + - name: Create interface - ZAPI + netapp.ontap.na_ontap_interface: + state: present + interface_name: data2 + home_port: e0d + home_node: laurentn-vsim1 + role: data + protocols: + - nfs + - cifs + admin_status: up + failover_policy: local-only + firewall_policy: mgmt + is_auto_revert: true + address: 10.10.10.10 + netmask: 255.255.255.0 + force_subnet_association: false + dns_domain_name: test.com + listen_for_dns_query: true + is_dns_update_enabled: true + vserver: svm1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create data interface - REST - NAS + netapp.ontap.na_ontap_interface: + state: present + interface_name: data2 + home_port: e0d + home_node: laurentn-vsim1 + admin_status: up + failover_scope: home_node_only + service_policy: default-data-files + is_auto_revert: true + interface_type: ip + address: 10.10.10.10 + netmask: 255.255.255.0 + vserver: svm1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create cluster interface - ZAPI + netapp.ontap.na_ontap_interface: + state: present + interface_name: cluster_lif + home_port: e0a + home_node: cluster1-01 + role: cluster + admin_status: up + is_auto_revert: true + is_ipv4_link_local: true + vserver: Cluster + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create cluster interface - REST + netapp.ontap.na_ontap_interface: + state: present + interface_name: cluster_lif + home_port: e0a + home_node: cluster1-01 + service_policy: default-cluster + admin_status: up + is_auto_revert: true + vserver: Cluster + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Rename interface + netapp.ontap.na_ontap_interface: + state: present + from_name: ansibleSVM_lif + interface_name: ansibleSVM_lif01 + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Migrate an interface + netapp.ontap.na_ontap_interface: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + vserver: ansible + https: true + validate_certs: false + state: present + interface_name: carchi_interface3 + home_port: e0d + home_node: ansdev-stor-1 + current_node: ansdev-stor-2 + role: data + failover_policy: local-only + firewall_policy: mgmt + is_auto_revert: true + address: 10.10.10.12 + netmask: 255.255.255.0 + force_subnet_association: false + admin_status: up + + - name: Delete interface + netapp.ontap.na_ontap_interface: + state: absent + interface_name: data2 + vserver: svm1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +''' + +RETURN = """ + +""" + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress + +FAILOVER_POLICIES = ['disabled', 'system-defined', 'local-only', 'sfo-partner-only', 'broadcast-domain-wide'] +FAILOVER_SCOPES = ['home_port_only', 'default', 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only'] +REST_UNSUPPORTED_OPTIONS = ['is_ipv4_link_local'] +REST_IGNORABLE_OPTIONS = ['failover_group', 'force_subnet_association', 'listen_for_dns_query'] + + +class NetAppOntapInterface: + ''' object to describe interface info ''' + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=[ + 'present', 'absent'], default='present'), + interface_name=dict(required=True, type='str'), + interface_type=dict(type='str', choices=['fc', 'ip']), + ipspace=dict(type='str'), + broadcast_domain=dict(type='str'), + home_node=dict(required=False, type='str', default=None), + current_node=dict(required=False, type='str'), + home_port=dict(required=False, type='str'), + current_port=dict(required=False, type='str'), + role=dict(required=False, type='str'), + is_ipv4_link_local=dict(required=False, type='bool', default=None), + address=dict(required=False, type='str'), + netmask=dict(required=False, type='str'), + vserver=dict(required=False, type='str'), + firewall_policy=dict(required=False, type='str', default=None), + failover_policy=dict(required=False, type='str', default=None, + choices=['disabled', 'system-defined', + 'local-only', 'sfo-partner-only', 'broadcast-domain-wide']), + failover_scope=dict(required=False, type='str', default=None, + choices=['home_port_only', 'default', + 'home_node_only', 'sfo_partners_only', 'broadcast_domain_only']), + failover_group=dict(required=False, type='str'), + admin_status=dict(required=False, choices=['up', 'down']), + subnet_name=dict(required=False, type='str'), + is_auto_revert=dict(required=False, type='bool', default=None), + protocols=dict(required=False, type='list', elements='str'), + data_protocol=dict(required=False, type='str', choices=['fc_nvme', 'fcp']), + force_subnet_association=dict(required=False, type='bool', default=None), + dns_domain_name=dict(required=False, type='str'), + listen_for_dns_query=dict(required=False, type='bool'), + is_dns_update_enabled=dict(required=False, type='bool'), + service_policy=dict(required=False, type='str', default=None), + from_name=dict(required=False, type='str'), + ignore_zapi_options=dict(required=False, type='list', elements='str', default=['force_subnet_association'], choices=REST_IGNORABLE_OPTIONS), + probe_port=dict(required=False, type='int'), + fail_if_subnet_conflicts=dict(required=False, type='bool'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ['subnet_name', 'address'], + ['subnet_name', 'netmask'], + ['is_ipv4_link_local', 'address'], + ['is_ipv4_link_local', 'netmask'], + ['is_ipv4_link_local', 'subnet_name'], + ['failover_policy', 'failover_scope'], + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = [key for key in REST_IGNORABLE_OPTIONS if key not in self.parameters['ignore_zapi_options']] + unsupported_rest_properties.extend(REST_UNSUPPORTED_OPTIONS) + if self.na_helper.safe_get(self.parameters, ['address']): + self.parameters['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters['address'], self.module) + partially_supported_rest_properties = [['dns_domain_name', (9, 9, 0)], ['is_dns_update_enabled', (9, 9, 1)], ['probe_port', (9, 10, 1)], + ['subnet_name', (9, 11, 1)], ['fail_if_subnet_conflicts', (9, 11, 1)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7, 0): + msg = 'REST requires ONTAP 9.7 or later for interface APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if self.use_rest: + self.cluster_nodes = None # cached value to limit number of API calls. + self.home_node = None # cached value to limit number of API calls. + self.map_failover_policy() + self.validate_rest_input_parameters() + # REST supports both netmask and cidr for ipv4 but cidr only for ipv6. + if self.parameters.get('netmask'): + self.parameters['netmask'] = str(netapp_ipaddress.netmask_to_netmask_length(self.parameters.get('address'), + self.parameters['netmask'], self.module)) + elif netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + for option in ('probe_port', 'fail_if_subnet_conflicts'): + if self.parameters.get(option) is not None: + self.module.fail_json(msg='Error option %s requires REST.' % option) + if 'vserver' not in self.parameters: + self.module.fail_json(msg='missing required argument with ZAPI: vserver') + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + # ZAPI supports only netmask. + if self.parameters.get('netmask'): + self.parameters['netmask'] = netapp_ipaddress.netmask_length_to_netmask(self.parameters.get('address'), + self.parameters['netmask'], self.module) + + def map_failover_policy(self): + if self.use_rest and 'failover_policy' in self.parameters: + mapping = dict(zip(FAILOVER_POLICIES, FAILOVER_SCOPES)) + self.parameters['failover_scope'] = mapping[self.parameters['failover_policy']] + + def set_interface_type(self, interface_type): + if 'interface_type' in self.parameters: + if self.parameters['interface_type'] != interface_type: + self.module.fail_json(msg="Error: mismatch between configured interface_type: %s and derived interface_type: %s." + % (self.parameters['interface_type'], interface_type)) + else: + self.parameters['interface_type'] = interface_type + + def derive_fc_data_protocol(self): + protocols = self.parameters.get('protocols') + if not protocols: + return + if len(protocols) > 1: + self.module.fail_json(msg="A single protocol entry is expected for FC interface, got %s." % protocols) + mapping = {'fc-nvme': 'fc_nvme', 'fc_nvme': 'fc_nvme', 'fcp': 'fcp'} + if protocols[0] not in mapping: + self.module.fail_json(msg="Unexpected protocol value %s." % protocols[0]) + data_protocol = mapping[protocols[0]] + if 'data_protocol' in self.parameters and self.parameters['data_protocol'] != data_protocol: + self.module.fail_json(msg="Error: mismatch between configured data_protocol: %s and data_protocols: %s" + % (self.parameters['data_protocol'], protocols)) + self.parameters['data_protocol'] = data_protocol + + def derive_interface_type(self): + protocols = self.parameters.get('protocols') + if protocols in (None, ["none"]): + if self.parameters.get('role') in ('cluster', 'intercluster') or any(x in self.parameters for x in ('address', 'netmask', 'subnet_name')): + self.set_interface_type('ip') + return + protocol_types = set() + unknown_protocols = [] + for protocol in protocols: + if protocol.lower() in ['fc-nvme', 'fcp']: + protocol_types.add('fc') + elif protocol.lower() in ['nfs', 'cifs', 'iscsi']: + protocol_types.add('ip') + elif protocol.lower() != 'none': + # none is an allowed value with ZAPI + unknown_protocols.append(protocol) + errors = [] + if unknown_protocols: + errors.append('unexpected value(s) for protocols: %s' % unknown_protocols) + if len(protocol_types) > 1: + errors.append('incompatible value(s) for protocols: %s' % protocols) + if errors: + self.module.fail_json(msg='Error: unable to determine interface type, please set interface_type: %s' % (' - '.join(errors))) + if protocol_types: + self.set_interface_type(protocol_types.pop()) + return + + def derive_block_file_type(self, protocols): + block_p, file_p, fcp = False, False, False + if protocols is None: + fcp = self.parameters.get('interface_type') == 'fc' + return fcp, file_p, fcp + block_values, file_values = [], [] + for protocol in protocols: + if protocol.lower() in ['fc-nvme', 'fcp', 'iscsi']: + block_p = True + block_values.append(protocol) + if protocol.lower() in ['fc-nvme', 'fcp']: + fcp = True + elif protocol.lower() in ['nfs', 'cifs']: + file_p = True + file_values.append(protocol) + if block_p and file_p: + self.module.fail_json(msg="Cannot use any of %s with %s" % (block_values, file_values)) + return block_p, file_p, fcp + + def get_interface_record_rest(self, if_type, query, fields): + if 'ipspace' in self.parameters and if_type == 'ip': + query['ipspace.name'] = self.parameters['ipspace'] + return rest_generic.get_one_record(self.rest_api, self.get_net_int_api(if_type), query, fields) + + def get_interface_records_rest(self, if_type, query, fields): + if 'ipspace' in self.parameters: + if if_type == 'ip': + query['ipspace.name'] = self.parameters['ipspace'] + else: + self.module.warn("ipspace is ignored for FC interfaces.") + records, error = rest_generic.get_0_or_more_records(self.rest_api, self.get_net_int_api(if_type), query, fields) + if error and 'are available in precluster.' in error: + # in precluster mode, network APIs are not available! + self.module.fail_json(msg="This module cannot use REST in precluster mode, ZAPI can be forced with use_rest: never. Error: %s" + % error) + return records, error + + def get_net_int_api(self, if_type=None): + if if_type is None: + if_type = self.parameters.get('interface_type') + if if_type is None: + self.module.fail_json(msg='Error: missing option "interface_type (or could not be derived)') + return 'network/%s/interfaces' % if_type + + def find_interface_record(self, records, home_node, name): + full_name = "%s_%s" % (home_node, name) if home_node is not None else name + full_name_records = [record for record in records if record['name'] == full_name] + if len(full_name_records) > 1: + self.module.fail_json(msg='Error: multiple records for: %s - %s' % (full_name, full_name_records)) + return full_name_records[0] if full_name_records else None + + def find_exact_match(self, records, name): + """ with vserver, we expect an exact match + but ONTAP transforms cluster interface names by prepending the home_port + """ + if 'vserver' in self.parameters: + if len(records) > 1: + self.module.fail_json(msg='Error: unexpected records for name: %s, vserver: %s - %s' + % (name, self.parameters['vserver'], records)) + return records[0] if records else None + # since our queries included a '*', we expect multiple records + # an exact match is _ or . + # is there an exact macth on name only? + record = self.find_interface_record(records, None, name) + # now matching with home_port as a prefix + if 'home_node' in self.parameters and self.parameters['home_node'] != 'localhost': + home_record = self.find_interface_record(records, self.parameters['home_node'], name) + if record and home_record: + self.module.warn('Found both %s, selecting %s' % ([record['name'] for record in (record, home_record)], home_record['name'])) + else: + # look for all known nodes + home_node_records = [] + for home_node in self.get_cluster_node_names_rest(): + home_record = self.find_interface_record(records, home_node, name) + if home_record: + home_node_records.append(home_record) + if len(home_node_records) > 1: + self.module.fail_json(msg='Error: multiple matches for name: %s: %s. Set home_node parameter.' + % (name, [record['name'] for record in home_node_records])) + home_record = home_node_records[0] if home_node_records else None + if record and home_node_records: + self.module.fail_json(msg='Error: multiple matches for name: %s: %s. Set home_node parameter.' + % (name, [record['name'] for record in (record, home_record)])) + if home_record: + record = home_record + if record and name == self.parameters['interface_name'] and name != record['name']: + # fix name, otherwise we'll attempt a rename :( + self.parameters['interface_name'] = record['name'] + self.module.warn('adjusting name from %s to %s' % (name, record['name'])) + return record + + def get_interface_rest(self, name): + """ + Return details about the interface + :param: + name : Name of the interface + + :return: Details about the interface. None if not found. + :rtype: dict + """ + self.derive_interface_type() + if_type = self.parameters.get('interface_type') + if 'vserver' in self.parameters: + query_ip = { + 'name': name, + 'svm.name': self.parameters['vserver'] + } + query_fc = query_ip + else: + query_ip = { + # ONTAP renames cluster interfaces, use a * to find them + 'name': '*%s' % name, + 'scope': 'cluster' + } + query_fc = None + fields = 'name,location,uuid,enabled,svm.name' + fields_fc = fields + ',data_protocol' + fields_ip = fields + ',ip,service_policy' + if self.parameters.get('dns_domain_name'): + fields_ip += ',dns_zone' + if self.parameters.get('probe_port') is not None: + fields_ip += ',probe_port' + if self.parameters.get('is_dns_update_enabled') is not None: + fields_ip += ',ddns_enabled' + if self.parameters.get('subnet_name') is not None: + fields_ip += ',subnet' + records, error, records2, error2 = None, None, None, None + if if_type in [None, 'ip']: + records, error = self.get_interface_records_rest('ip', query_ip, fields_ip) + if if_type in [None, 'fc'] and query_fc: + records2, error2 = self.get_interface_records_rest('fc', query_fc, fields_fc) + if records and records2: + msg = 'Error fetching interface %s - found duplicate entries, please indicate interface_type.' % name + msg += ' - ip interfaces: %s' % records + msg += ' - fc interfaces: %s' % records2 + self.module.fail_json(msg=msg) + if error is None and error2 is not None and records: + # ignore error on fc if ip interface is found + error2 = None + if error2 is None and error is not None and records2: + # ignore error on ip if fc interface is found + error = None + if error or error2: + errors = [to_native(err) for err in (error, error2) if err] + self.module.fail_json(msg='Error fetching interface details for %s: %s' % (name, ' - '.join(errors)), + exception=traceback.format_exc()) + if records: + self.set_interface_type('ip') + if records2: + self.set_interface_type('fc') + records = records2 + + record = self.find_exact_match(records, name) if records else None + return self.dict_from_record(record) + + def dict_from_record(self, record): + if not record: + return None + # Note: broadcast_domain is CreateOnly + return_value = { + 'interface_name': record['name'], + 'interface_type': self.parameters['interface_type'], + 'uuid': record['uuid'], + 'admin_status': 'up' if record['enabled'] else 'down', + } + # home_node/home_port not present for FC on ONTAP 9.7. + if self.na_helper.safe_get(record, ['location', 'home_node', 'name']): + return_value['home_node'] = record['location']['home_node']['name'] + if self.na_helper.safe_get(record, ['location', 'home_port', 'name']): + return_value['home_port'] = record['location']['home_port']['name'] + if self.na_helper.safe_get(record, ['svm', 'name']): + return_value['vserver'] = record['svm']['name'] + if 'data_protocol' in record: + return_value['data_protocol'] = record['data_protocol'] + if 'auto_revert' in record['location']: + return_value['is_auto_revert'] = record['location']['auto_revert'] + if 'failover' in record['location']: + return_value['failover_scope'] = record['location']['failover'] + # if interface_attributes.get_child_by_name('failover-group'): + # return_value['failover_group'] = interface_attributes['failover-group'] + if self.na_helper.safe_get(record, ['ip', 'address']): + return_value['address'] = netapp_ipaddress.validate_and_compress_ip_address(record['ip']['address'], self.module) + if self.na_helper.safe_get(record, ['ip', 'netmask']) is not None: + return_value['netmask'] = record['ip']['netmask'] + if self.na_helper.safe_get(record, ['service_policy', 'name']): + return_value['service_policy'] = record['service_policy']['name'] + if self.na_helper.safe_get(record, ['location', 'node', 'name']): + return_value['current_node'] = record['location']['node']['name'] + if self.na_helper.safe_get(record, ['location', 'port', 'name']): + return_value['current_port'] = record['location']['port']['name'] + if self.na_helper.safe_get(record, ['dns_zone']): + return_value['dns_domain_name'] = record['dns_zone'] + if self.na_helper.safe_get(record, ['probe_port']) is not None: + return_value['probe_port'] = record['probe_port'] + if 'ddns_enabled' in record: + return_value['is_dns_update_enabled'] = record['ddns_enabled'] + if self.na_helper.safe_get(record, ['subnet', 'name']): + return_value['subnet_name'] = record['subnet']['name'] + return return_value + + def get_node_port(self, uuid): + record, error = self.get_interface_record_rest(self.parameters['interface_type'], {'uuid': uuid}, 'location') + if error or not record: + return None, None, error + node = self.na_helper.safe_get(record, ['location', 'node', 'name']) + port = self.na_helper.safe_get(record, ['location', 'port', 'name']) + return node, port, None + + def get_interface(self, name=None): + """ + Return details about the interface + :param: + name : Name of the interface + + :return: Details about the interface. None if not found. + :rtype: dict + """ + if name is None: + name = self.parameters['interface_name'] + if self.use_rest: + return self.get_interface_rest(name) + + interface_info = netapp_utils.zapi.NaElement('net-interface-get-iter') + interface_attributes = netapp_utils.zapi.NaElement('net-interface-info') + interface_attributes.add_new_child('interface-name', name) + interface_attributes.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(interface_attributes) + interface_info.add_child_elem(query) + try: + result = self.server.invoke_successfully(interface_info, True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error fetching interface details for %s: %s' % + (name, to_native(exc)), + exception=traceback.format_exc()) + return_value = None + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + + interface_attributes = result.get_child_by_name('attributes-list'). \ + get_child_by_name('net-interface-info') + return_value = { + 'interface_name': name, + 'admin_status': interface_attributes['administrative-status'], + 'home_port': interface_attributes['home-port'], + 'home_node': interface_attributes['home-node'], + 'failover_policy': interface_attributes['failover-policy'].replace('_', '-'), + } + if interface_attributes.get_child_by_name('is-auto-revert'): + return_value['is_auto_revert'] = (interface_attributes['is-auto-revert'] == 'true') + if interface_attributes.get_child_by_name('failover-group'): + return_value['failover_group'] = interface_attributes['failover-group'] + if interface_attributes.get_child_by_name('address'): + return_value['address'] = netapp_ipaddress.validate_and_compress_ip_address(interface_attributes['address'], self.module) + if interface_attributes.get_child_by_name('netmask'): + return_value['netmask'] = interface_attributes['netmask'] + if interface_attributes.get_child_by_name('firewall-policy'): + return_value['firewall_policy'] = interface_attributes['firewall-policy'] + if interface_attributes.get_child_by_name('dns-domain-name') not in ('none', None): + return_value['dns_domain_name'] = interface_attributes['dns-domain-name'] + else: + return_value['dns_domain_name'] = None + if interface_attributes.get_child_by_name('listen-for-dns-query'): + return_value['listen_for_dns_query'] = self.na_helper.get_value_for_bool(True, interface_attributes[ + 'listen-for-dns-query']) + if interface_attributes.get_child_by_name('is-dns-update-enabled'): + return_value['is_dns_update_enabled'] = self.na_helper.get_value_for_bool(True, interface_attributes[ + 'is-dns-update-enabled']) + if interface_attributes.get_child_by_name('is-ipv4-link-local'): + return_value['is_ipv4_link_local'] = self.na_helper.get_value_for_bool(True, interface_attributes[ + 'is-ipv4-link-local']) + if interface_attributes.get_child_by_name('service-policy'): + return_value['service_policy'] = interface_attributes['service-policy'] + if interface_attributes.get_child_by_name('current-node'): + return_value['current_node'] = interface_attributes['current-node'] + if interface_attributes.get_child_by_name('current-port'): + return_value['current_port'] = interface_attributes['current-port'] + return return_value + + @staticmethod + def set_options(options, parameters): + """ set attributes for create or modify """ + if parameters.get('role') is not None: + options['role'] = parameters['role'] + if parameters.get('home_node') is not None: + options['home-node'] = parameters['home_node'] + if parameters.get('home_port') is not None: + options['home-port'] = parameters['home_port'] + if parameters.get('subnet_name') is not None: + options['subnet-name'] = parameters['subnet_name'] + if parameters.get('address') is not None: + options['address'] = parameters['address'] + if parameters.get('netmask') is not None: + options['netmask'] = parameters['netmask'] + if parameters.get('failover_policy') is not None: + options['failover-policy'] = parameters['failover_policy'] + if parameters.get('failover_group') is not None: + options['failover-group'] = parameters['failover_group'] + if parameters.get('firewall_policy') is not None: + options['firewall-policy'] = parameters['firewall_policy'] + if parameters.get('is_auto_revert') is not None: + options['is-auto-revert'] = 'true' if parameters['is_auto_revert'] else 'false' + if parameters.get('admin_status') is not None: + options['administrative-status'] = parameters['admin_status'] + if parameters.get('force_subnet_association') is not None: + options['force-subnet-association'] = 'true' if parameters['force_subnet_association'] else 'false' + if parameters.get('dns_domain_name') is not None: + options['dns-domain-name'] = parameters['dns_domain_name'] + if parameters.get('listen_for_dns_query') is not None: + options['listen-for-dns-query'] = 'true' if parameters['listen_for_dns_query'] else 'false' + if parameters.get('is_dns_update_enabled') is not None: + options['is-dns-update-enabled'] = 'true' if parameters['is_dns_update_enabled'] else 'false' + if parameters.get('is_ipv4_link_local') is not None: + options['is-ipv4-link-local'] = 'true' if parameters['is_ipv4_link_local'] else 'false' + if parameters.get('service_policy') is not None: + options['service-policy'] = parameters['service_policy'] + + def fix_errors(self, options, errors): + '''ignore role and firewall_policy if a service_policy can be safely derived''' + block_p, file_p, fcp = self.derive_block_file_type(self.parameters.get('protocols')) + if 'role' in errors: + fixed = False + if errors['role'] == 'data' and errors.get('firewall_policy', 'data') == 'data': + if fcp: + # service_policy is not supported for FC interfaces + fixed = True + elif file_p and self.parameters.get('service_policy', 'default-data-files') == 'default-data-files': + options['service_policy'] = 'default-data-files' + fixed = True + elif block_p and self.parameters.get('service_policy', 'default-data-blocks') == 'default-data-blocks': + options['service_policy'] = 'default-data-blocks' + fixed = True + if errors['role'] == 'data' and errors.get('firewall_policy') == 'mgmt': + options['service_policy'] = 'default-management' + fixed = True + if errors['role'] == 'intercluster' and errors.get('firewall_policy') in [None, 'intercluster']: + options['service_policy'] = 'default-intercluster' + fixed = True + if errors['role'] == 'cluster' and errors.get('firewall_policy') in [None, 'mgmt']: + options['service_policy'] = 'default-cluster' + fixed = True + if errors['role'] == 'data' and fcp and errors.get('firewall_policy') is None: + # ignore role for FC interface + fixed = True + if fixed: + errors.pop('role') + errors.pop('firewall_policy', None) + + def set_options_rest(self, parameters): + """ set attributes for create or modify """ + def add_ip(options, key, value): + if 'ip' not in options: + options['ip'] = {} + options['ip'][key] = value + + def add_location(options, key, value, node=None): + if 'location' not in options: + options['location'] = {} + # Note: broadcast_domain is CreateOnly + if key in ['home_node', 'home_port', 'node', 'port', 'broadcast_domain']: + options['location'][key] = {'name': value} + else: + options['location'][key] = value + if key in ['home_port', 'port']: + options['location'][key]['node'] = {'name': node} + + def get_node_for_port(parameters, pkey): + if pkey == 'current_port': + return parameters.get('current_node') or self.parameters.get('home_node') or self.get_home_node_for_cluster() + elif pkey == 'home_port': + return self.parameters.get('home_node') or self.get_home_node_for_cluster() + else: + return None + + options, migrate_options, errors = {}, {}, {} + + # We normally create using home_port, and migrate to current. + # But for FC, home_port is not supported on 9.7 or earlier! + create_with_current = False + if parameters is None: + parameters = self.parameters + if self.parameters['interface_type'] == 'fc' and 'home_port' not in self.parameters: + create_with_current = True + + mapping_params_to_rest = { + 'admin_status': 'enabled', + 'interface_name': 'name', + 'vserver': 'svm.name', + # LOCATION + 'current_port': 'port', + 'home_port': 'home_port' + } + if self.parameters['interface_type'] == 'ip': + mapping_params_to_rest.update({ + 'ipspace': 'ipspace.name', + 'service_policy': 'service_policy', + 'dns_domain_name': 'dns_zone', + 'is_dns_update_enabled': 'ddns_enabled', + 'probe_port': 'probe_port', + 'subnet_name': 'subnet.name', + 'fail_if_subnet_conflicts': 'fail_if_subnet_conflicts', + # IP + 'address': 'address', + 'netmask': 'netmask', + # LOCATION + 'broadcast_domain': 'broadcast_domain', + 'failover_scope': 'failover', + 'is_auto_revert': 'auto_revert', + # home_node/current_node supported only in ip interfaces. + 'home_node': 'home_node', + 'current_node': 'node' + }) + if self.parameters['interface_type'] == 'fc': + mapping_params_to_rest['data_protocol'] = 'data_protocol' + ip_keys = ('address', 'netmask') + location_keys = ('home_port', 'home_node', 'current_port', 'current_node', 'failover_scope', 'is_auto_revert', 'broadcast_domain') + + # don't add node location when port structure is already present + has_home_port, has_current_port = False, False + if 'home_port' in parameters: + has_home_port = True + if 'current_port' in parameters: + has_current_port = True + + for pkey, rkey in mapping_params_to_rest.items(): + if pkey in parameters: + if pkey == 'admin_status': + options[rkey] = parameters[pkey] == 'up' + elif pkey in ip_keys: + add_ip(options, rkey, parameters[pkey]) + elif pkey in location_keys: + if has_home_port and pkey == 'home_node': + continue + if has_current_port and pkey == 'current_node': + continue + dest = migrate_options if rkey in ('node', 'port') and not create_with_current else options + add_location(dest, rkey, parameters[pkey], get_node_for_port(parameters, pkey)) + else: + options[rkey] = parameters[pkey] + + keys_in_error = ('role', 'failover_group', 'firewall_policy', 'force_subnet_association', + 'listen_for_dns_query', 'is_ipv4_link_local') + for pkey in keys_in_error: + if pkey in parameters: + errors[pkey] = parameters[pkey] + + return options, migrate_options, errors + + def set_protocol_option(self, required_keys): + """ set protocols for create """ + if self.parameters.get('protocols') is None: + return None + data_protocols_obj = netapp_utils.zapi.NaElement('data-protocols') + for protocol in self.parameters.get('protocols'): + if protocol.lower() in ['fc-nvme', 'fcp']: + if 'address' in required_keys: + required_keys.remove('address') + if 'home_port' in required_keys: + required_keys.remove('home_port') + if 'netmask' in required_keys: + required_keys.remove('netmask') + not_required_params = set(['address', 'netmask', 'firewall_policy']) + if not not_required_params.isdisjoint(set(self.parameters.keys())): + self.module.fail_json(msg='Error: Following parameters for creating interface are not supported' + ' for data-protocol fc-nvme: %s' % ', '.join(not_required_params)) + data_protocols_obj.add_new_child('data-protocol', protocol) + return data_protocols_obj + + def get_cluster_node_names_rest(self): + ''' get cluster node names, but the cluster may not exist yet + return: + empty list if the cluster cannot be reached + a list of nodes + ''' + if self.cluster_nodes is None: + records, error = rest_generic.get_0_or_more_records(self.rest_api, 'cluster/nodes', fields='name,uuid,cluster_interfaces') + if error: + self.module.fail_json(msg='Error fetching cluster node info: %s' % to_native(error), + exception=traceback.format_exc()) + self.cluster_nodes = records or [] + return [record['name'] for record in self.cluster_nodes] + + def get_home_node_for_cluster(self): + ''' get the first node name from this cluster ''' + if self.use_rest: + if not self.home_node: + nodes = self.get_cluster_node_names_rest() + if nodes: + self.home_node = nodes[0] + return self.home_node + + get_node = netapp_utils.zapi.NaElement('cluster-node-get-iter') + attributes = { + 'query': { + 'cluster-node-info': {} + } + } + get_node.translate_struct(attributes) + try: + result = self.server.invoke_successfully(get_node, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + if str(exc.code) == '13003' or exc.message == 'ZAPI is not enabled in pre-cluster mode.': + return None + self.module.fail_json(msg='Error fetching node for interface %s: %s' % + (self.parameters['interface_name'], to_native(exc)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes = result.get_child_by_name('attributes-list') + return attributes.get_child_by_name('cluster-node-info').get_child_content('node-name') + return None + + def validate_rest_input_parameters(self, action=None): + if 'vserver' in self.parameters and self.parameters.get('role') in ['cluster', 'intercluster', 'node-mgmt', 'cluster-mgmt']: + # REST only supports DATA SVMs + del self.parameters['vserver'] + self.module.warn('Ignoring vserver with REST for non data SVM.') + errors = [] + if action == 'create': + if 'vserver' not in self.parameters and 'ipspace' not in self.parameters: + errors.append('ipspace name must be provided if scope is cluster, or vserver for svm scope.') + if self.parameters['interface_type'] == 'fc': + unsupported_fc_options = ['broadcast_domain', 'dns_domain_name', 'is_dns_update_enabled', 'probe_port', 'subnet_name', + 'fail_if_subnet_conflicts'] + used_unsupported_fc_options = [option for option in unsupported_fc_options if option in self.parameters] + if used_unsupported_fc_options: + plural = 's' if len(used_unsupported_fc_options) > 1 else '' + errors.append('%s option%s only supported for IP interfaces: %s, interface_type: %s' + % (', '.join(used_unsupported_fc_options), plural, self.parameters.get('interface_name'), self.parameters['interface_type'])) + if self.parameters.get('home_port') and self.parameters.get('broadcast_domain'): + errors.append('home_port and broadcast_domain are mutually exclusive for creating: %s' + % self.parameters.get('interface_name')) + if self.parameters.get('role') == "intercluster" and self.parameters.get('protocols') is not None: + errors.append('Protocol cannot be specified for intercluster role, failed to create interface.') + if errors: + self.module.fail_json(msg='Error: %s' % ' '.join(errors)) + + ignored_keys = [] + for key in self.parameters.get('ignore_zapi_options', []): + if key in self.parameters: + del self.parameters[key] + ignored_keys.append(key) + if ignored_keys: + self.module.warn("Ignoring %s" % ', '.join(ignored_keys)) + # if role is intercluster, protocol cannot be specified + + def validate_required_parameters(self, keys): + ''' + Validate if required parameters for create or modify are present. + Parameter requirement might vary based on given data-protocol. + :return: None + ''' + home_node = self.parameters.get('home_node') or self.get_home_node_for_cluster() + # validate if mandatory parameters are present for create or modify + errors = [] + if self.use_rest and home_node is None and self.parameters.get('home_port') is not None: + errors.append('Cannot guess home_node, home_node is required when home_port is present with REST.') + if 'broadcast_domain_home_port_or_home_node' in keys: + if all(x not in self.parameters for x in ['broadcast_domain', 'home_port', 'home_node']): + errors.append("At least one of 'broadcast_domain', 'home_port', 'home_node' is required to create an IP interface.") + keys.remove('broadcast_domain_home_port_or_home_node') + if not keys.issubset(set(self.parameters.keys())): + errors.append('Missing one or more required parameters for creating interface: %s.' % ', '.join(keys)) + if 'interface_type' in keys and 'interface_type' in self.parameters: + if self.parameters['interface_type'] not in ['fc', 'ip']: + errors.append('unexpected value for interface_type: %s.' % self.parameters['interface_type']) + elif self.parameters['interface_type'] == 'fc': + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0): + if 'home_port' in self.parameters: + errors.append("'home_port' is not supported for FC interfaces with 9.7, use 'current_port', avoid home_node.") + if 'home_node' in self.parameters: + self.module.warn("Avoid 'home_node' with FC interfaces with 9.7, use 'current_node'.") + if 'vserver' not in self.parameters: + errors.append("A data 'vserver' is required for FC interfaces.") + if 'service_policy' in self.parameters: + errors.append("'service_policy' is not supported for FC interfaces.") + if 'role' in self.parameters and self.parameters.get('role') != 'data': + errors.append("'role' is deprecated, and 'data' is the only value supported for FC interfaces: found %s." % self.parameters.get('role')) + if 'probe_port' in self.parameters: + errors.append("'probe_port' is not supported for FC interfaces.") + if errors: + self.module.fail_json(msg='Error: %s' % ' '.join(errors)) + + def validate_modify_parameters(self, body): + """ Only the following keys can be modified: + enabled, ip, location, name, service_policy + """ + bad_keys = [key for key in body if key not in ['enabled', 'ip', 'location', 'name', 'service_policy', 'dns_zone', 'ddns_enabled', 'subnet.name', + 'fail_if_subnet_conflicts']] + if bad_keys: + plural = 's' if len(bad_keys) > 1 else '' + self.module.fail_json(msg='The following option%s cannot be modified: %s' % (plural, ', '.join(bad_keys))) + + def build_rest_body(self, modify=None): + required_keys = set(['interface_type']) # python 2.6 syntax + # running validation twice, as interface_type dictates the second set of requirements + self.validate_required_parameters(required_keys) + self.validate_rest_input_parameters(action='modify' if modify else 'create') + if modify: + # force the value of fail_if_subnet_conflicts as it is writeOnly + if self.parameters.get('fail_if_subnet_conflicts') is not None: + modify['fail_if_subnet_conflicts'] = self.parameters['fail_if_subnet_conflicts'] + else: + required_keys = set() + required_keys.add('interface_name') + if self.parameters['interface_type'] == 'fc': + self.derive_fc_data_protocol() + required_keys.add('data_protocol') + if 'home_port' not in self.parameters: + # home_port is not supported with 9.7 + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0): + required_keys.add('home_port') + else: + required_keys.add('current_port') + if self.parameters['interface_type'] == 'ip': + if 'subnet_name' not in self.parameters: + required_keys.add('address') + required_keys.add('netmask') + required_keys.add('broadcast_domain_home_port_or_home_node') + self.validate_required_parameters(required_keys) + body, migrate_body, errors = self.set_options_rest(modify) + self.fix_errors(body, errors) + if errors: + self.module.fail_json(msg='Error %s interface, unsupported options: %s' + % ('modifying' if modify else 'creating', str(errors))) + if modify: + self.validate_modify_parameters(body) + return body, migrate_body + + def create_interface_rest(self, body): + ''' calling REST to create interface ''' + query = {'return_records': 'true'} + records, error = rest_generic.post_async(self.rest_api, self.get_net_int_api(), body, query) + if error: + self.module.fail_json(msg='Error creating interface %s: %s' % (self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + return records + + def create_interface(self, body): + ''' calling zapi to create interface ''' + if self.use_rest: + return self.create_interface_rest(body) + + required_keys = set(['role', 'home_port']) + if self.parameters.get('subnet_name') is None and self.parameters.get('is_ipv4_link_local') is None: + required_keys.add('address') + required_keys.add('netmask') + if self.parameters.get('service_policy') is not None: + required_keys.remove('role') + data_protocols_obj = self.set_protocol_option(required_keys) + self.validate_required_parameters(required_keys) + + options = {'interface-name': self.parameters['interface_name'], + 'vserver': self.parameters['vserver']} + NetAppOntapInterface.set_options(options, self.parameters) + interface_create = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-create', **options) + if data_protocols_obj is not None: + interface_create.add_child_elem(data_protocols_obj) + try: + self.server.invoke_successfully(interface_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + # msg: "Error Creating interface ansible_interface: NetApp API failed. Reason - 17:A LIF with the same name already exists" + if to_native(exc.code) == "17": + self.na_helper.changed = False + else: + self.module.fail_json(msg='Error Creating interface %s: %s' % + (self.parameters['interface_name'], to_native(exc)), + exception=traceback.format_exc()) + + def delete_interface_rest(self, uuid): + ''' calling zapi to delete interface ''' + + dummy, error = rest_generic.delete_async(self.rest_api, self.get_net_int_api(), uuid) + if error: + self.module.fail_json(msg='Error deleting interface %s: %s' % (self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_interface(self, current_status, current_interface, uuid): + ''' calling zapi to delete interface ''' + if current_status == 'up': + self.parameters['admin_status'] = 'down' + if self.use_rest: + # only for fc interfaces disable is required before delete. + if current_interface == 'fc': + self.modify_interface_rest(uuid, {'enabled': False}) + else: + self.modify_interface({'admin_status': 'down'}) + + if self.use_rest: + return self.delete_interface_rest(uuid) + + interface_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-interface-delete', **{'interface-name': self.parameters['interface_name'], + 'vserver': self.parameters['vserver']}) + try: + self.server.invoke_successfully(interface_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error deleting interface %s: %s' % + (self.parameters['interface_name'], to_native(exc)), + exception=traceback.format_exc()) + + def modify_interface_rest(self, uuid, body): + ''' calling REST to modify interface ''' + if not body: + return + dummy, error = rest_generic.patch_async(self.rest_api, self.get_net_int_api(), uuid, body) + if error: + self.module.fail_json(msg='Error modifying interface %s: %s' % (self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + + def migrate_interface_rest(self, uuid, body): + # curiously, we sometimes need to send the request twice (well, always in my experience) + errors = [] + desired_node = self.na_helper.safe_get(body, ['location', 'node', 'name']) + desired_port = self.na_helper.safe_get(body, ['location', 'port', 'name']) + for __ in range(12): + self.modify_interface_rest(uuid, body) + time.sleep(10) + node, port, error = self.get_node_port(uuid) + if error is None and desired_node in [None, node] and desired_port in [None, port]: + return + if errors or error is not None: + errors.append(str(error)) + if errors: + self.module.fail_json(msg='Errors waiting for migration to complete: %s' % ' - '.join(errors)) + else: + self.module.warn('Failed to confirm interface is migrated after 120 seconds') + + def modify_interface(self, modify, uuid=None, body=None): + """ + Modify the interface. + """ + if self.use_rest: + return self.modify_interface_rest(uuid, body) + + # Current_node and current_port don't exist in modify only migrate, so we need to remove them from the list + migrate = {} + modify_options = dict(modify) + if modify_options.get('current_node') is not None: + migrate['current_node'] = modify_options.pop('current_node') + if modify_options.get('current_port') is not None: + migrate['current_port'] = modify_options.pop('current_port') + if modify_options: + options = {'interface-name': self.parameters['interface_name'], + 'vserver': self.parameters['vserver'] + } + NetAppOntapInterface.set_options(options, modify_options) + interface_modify = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-modify', **options) + try: + self.server.invoke_successfully(interface_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as err: + self.module.fail_json(msg='Error modifying interface %s: %s' % + (self.parameters['interface_name'], to_native(err)), + exception=traceback.format_exc()) + # if home node has been changed we need to migrate the interface + if migrate: + self.migrate_interface() + + def migrate_interface(self): + # ZAPI + interface_migrate = netapp_utils.zapi.NaElement('net-interface-migrate') + if self.parameters.get('current_node') is None: + self.module.fail_json(msg='current_node must be set to migrate') + interface_migrate.add_new_child('destination-node', self.parameters['current_node']) + if self.parameters.get('current_port') is not None: + interface_migrate.add_new_child('destination-port', self.parameters['current_port']) + interface_migrate.add_new_child('lif', self.parameters['interface_name']) + interface_migrate.add_new_child('vserver', self.parameters['vserver']) + try: + self.server.invoke_successfully(interface_migrate, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error migrating %s: %s' + % (self.parameters['current_node'], to_native(error)), + exception=traceback.format_exc()) + # like with REST, the migration may not be completed on the first try! + # just blindly do it twice. + try: + self.server.invoke_successfully(interface_migrate, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error migrating %s: %s' + % (self.parameters['current_node'], to_native(error)), + exception=traceback.format_exc()) + + def rename_interface(self): + options = { + 'interface-name': self.parameters['from_name'], + 'new-name': self.parameters['interface_name'], + 'vserver': self.parameters['vserver'] + } + interface_rename = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-rename', **options) + try: + self.server.invoke_successfully(interface_rename, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming %s to %s: %s' + % (self.parameters['from_name'], self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_action(self): + modify, rename, new_name = None, None, None + current = self.get_interface() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # create by renaming existing interface + # self.parameters['interface_name'] may be overriden in self.get_interface so save a copy + new_name = self.parameters['interface_name'] + old_interface = self.get_interface(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(old_interface, current) + if rename is None: + self.module.fail_json(msg='Error renaming interface %s: no interface with from_name %s.' + % (self.parameters['interface_name'], self.parameters['from_name'])) + if rename: + current = old_interface + cd_action = None + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if rename and self.use_rest: + rename = False + if 'interface_name' not in modify: + modify['interface_name'] = new_name + if modify and modify.get('home_node') == 'localhost': + modify.pop('home_node') + if not modify: + self.na_helper.changed = False + + return cd_action, modify, rename, current + + def build_rest_payloads(self, cd_action, modify, current): + body, migrate_body = None, None + uuid = current.get('uuid') if current else None + if self.use_rest: + if cd_action == 'create': + body, migrate_body = self.build_rest_body() + elif modify: + # fc interface supports only home_port and port in POST/PATCH. + # add home_port and current_port in modify for home_node and current_node respectively to form home_port/port. + if modify.get('home_node') and not modify.get('home_port') and self.parameters['interface_type'] == 'fc': + modify['home_port'] = current['home_port'] + # above will modify home_node of fc interface, after modify if requires to update current_node, it will error out for fc interface. + # migrate not supported for fc interface. + if modify.get('current_node') and not modify.get('current_port') and self.parameters['interface_type'] == 'fc': + modify['current_port'] = current['current_port'] + body, migrate_body = self.build_rest_body(modify) + if (modify or cd_action == 'delete') and uuid is None: + self.module.fail_json(msg='Error, expecting uuid in existing record') + desired_home_port = self.na_helper.safe_get(body, ['location', 'home_port']) + desired_current_port = self.na_helper.safe_get(migrate_body, ['location', 'port']) + # if try to modify both home_port and current_port in FC interface and if its equal, make migrate_body None + if self.parameters.get('interface_type') == 'fc' and desired_home_port and desired_current_port and desired_home_port == desired_current_port: + migrate_body = None + return uuid, body, migrate_body + + def apply(self): + ''' calling all interface features ''' + cd_action, modify, rename, current = self.get_action() + # build the payloads even in check_mode, to perform validations + uuid, body, migrate_body = self.build_rest_payloads(cd_action, modify, current) + if self.na_helper.changed and not self.module.check_mode: + if rename and not self.use_rest: + self.rename_interface() + modify.pop('interface_name') + if cd_action == 'create': + records = self.create_interface(body) + if records: + # needed for migrate after creation + uuid = records['records'][0]['uuid'] + elif cd_action == 'delete': + # interface type returned in REST but not in ZAPI. + interface_type = current['interface_type'] if self.use_rest else None + self.delete_interface(current['admin_status'], interface_type, uuid) + elif modify: + self.modify_interface(modify, uuid, body) + if migrate_body: + # for 9.7 or earlier, allow modify current node/port for fc interface. + if self.parameters.get('interface_type') == 'fc' and self.use_rest and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0): + self.module.fail_json(msg="Error: cannot migrate FC interface") + self.migrate_interface_rest(uuid, migrate_body) + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + interface = NetAppOntapInterface() + interface.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py new file mode 100644 index 000000000..d8cf8144e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ipspace.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +""" +this is ipspace module + +# (c) 2018, NTT Europe Ltd. +# (c) 2020-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: na_ontap_ipspace + +short_description: NetApp ONTAP Manage an ipspace + +version_added: 2.9.0 + +author: + - NTTE Storage Engineering (@vicmunoz) + +description: + - Manage an ipspace for an Ontap Cluster + +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap + +options: + state: + description: + - Whether the specified ipspace should exist or not + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - The name of the ipspace to manage + required: true + type: str + from_name: + description: + - Name of the existing ipspace to be renamed to name + type: str +''' + +EXAMPLES = """ + - name: Create ipspace + netapp.ontap.na_ontap_ipspace: + state: present + name: ansibleIpspace + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete ipspace + netapp.ontap.na_ontap_ipspace: + state: absent + name: ansibleIpspace + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Rename ipspace + netapp.ontap.na_ontap_ipspace: + state: present + name: ansibleIpspace_newname + from_name: ansibleIpspace + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapIpspace(object): + '''Class with ipspace operations''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.uuid = None + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def ipspace_get_iter(self, name): + """ + Return net-ipspaces-get-iter query results + :param name: Name of the ipspace + :return: NaElement if ipspace found, None otherwise + """ + ipspace_get_iter = netapp_utils.zapi.NaElement('net-ipspaces-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-ipspaces-info', **{'ipspace': name}) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + ipspace_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully( + ipspace_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + # Error 14636 denotes an ipspace does not exist + # Error 13073 denotes an ipspace not found + if to_native(error.code) == "14636" or to_native(error.code) == "13073": + return None + self.module.fail_json( + msg="Error getting ipspace %s: %s" % (name, to_native(error)), + exception=traceback.format_exc()) + return result + + def get_ipspace(self, name=None): + """ + Fetch details if ipspace exists + :param name: Name of the ipspace to be fetched + :return: + Dictionary of current details if ipspace found + None if ipspace is not found + """ + if name is None: + name = self.parameters['name'] + if self.use_rest: + api = 'network/ipspaces' + query = {'name': name, 'fields': 'uuid'} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error getting ipspace %s: %s" % (name, error)) + if record: + self.uuid = record['uuid'] + return record + return None + else: + ipspace_get = self.ipspace_get_iter(name) + if (ipspace_get and ipspace_get.get_child_by_name('num-records') and + int(ipspace_get.get_child_content('num-records')) >= 1): + current_ipspace = dict() + attr_list = ipspace_get.get_child_by_name('attributes-list') + attr = attr_list.get_child_by_name('net-ipspaces-info') + current_ipspace['name'] = attr.get_child_content('ipspace') + return current_ipspace + return None + + def create_ipspace(self): + """ + Create ipspace + :return: None + """ + if self.use_rest: + api = 'network/ipspaces' + body = {'name': self.parameters['name']} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error provisioning ipspace %s: %s" % (self.parameters['name'], error)) + else: + ipspace_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-ipspaces-create', **{'ipspace': self.parameters['name']}) + try: + self.server.invoke_successfully(ipspace_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg="Error provisioning ipspace %s: %s" % ( + self.parameters['name'], + to_native(error)), + exception=traceback.format_exc()) + + def delete_ipspace(self): + """ + Destroy ipspace + :return: None + """ + if self.use_rest: + api = 'network/ipspaces' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg="Error removing ipspace %s: %s" % (self.parameters['name'], error)) + else: + ipspace_destroy = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-ipspaces-destroy', + **{'ipspace': self.parameters['name']}) + try: + self.server.invoke_successfully( + ipspace_destroy, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg="Error removing ipspace %s: %s" % ( + self.parameters['name'], + to_native(error)), + exception=traceback.format_exc()) + + def rename_ipspace(self): + """ + Rename an ipspace + :return: Nothing + """ + if self.use_rest: + api = 'network/ipspaces' + body = {'name': self.parameters['name']} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg="Error renaming ipspace %s: %s" % (self.parameters['from_name'], error)) + else: + ipspace_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-ipspaces-rename', + **{'ipspace': self.parameters['from_name'], + 'new-name': self.parameters['name']}) + try: + self.server.invoke_successfully(ipspace_rename, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg="Error renaming ipspace %s: %s" % ( + self.parameters['from_name'], + to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + """ + Apply action to the ipspace + :return: Nothing + """ + current = self.get_ipspace() + # rename and create are mutually exclusive + rename, modify = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + rename = self.na_helper.is_rename_action( + self.get_ipspace(self.parameters['from_name']), + current) + if rename is None: + self.module.fail_json( + msg="Error renaming: ipspace %s does not exist" % + self.parameters['from_name']) + # reset cd_action to None and add name to modify to indicate rename. + cd_action = None + modify = {'name': self.parameters['name']} + + if self.na_helper.changed and not self.module.check_mode: + if rename: + self.rename_ipspace() + elif cd_action == 'create': + self.create_ipspace() + elif cd_action == 'delete': + self.delete_ipspace() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action + :return: nothing + """ + obj = NetAppOntapIpspace() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py new file mode 100644 index 000000000..e5a30f970 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi.py @@ -0,0 +1,329 @@ +#!/usr/bin/python + +# (c) 2017-2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_iscsi +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_iscsi +short_description: NetApp ONTAP manage iSCSI service +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - create, delete, start, stop iSCSI service on SVM. + +options: + + state: + description: + - Whether the service should be present or deleted. + choices: ['present', 'absent'] + type: str + default: present + + service_state: + description: + - Whether the specified service should running. + choices: ['started', 'stopped'] + type: str + + vserver: + required: true + type: str + description: + - The name of the vserver to use. + + target_alias: + type: str + description: + - The iSCSI target alias of the iSCSI service. + - The target alias can contain one (1) to 128 characters and feature any printable character except space (" "). + - A PATCH request with an empty alias ("") clears the alias. + - This option is REST only. + version_added: 22.2.0 + +''' + +EXAMPLES = """ +- name: Create iscsi service + netapp.ontap.na_ontap_iscsi: + state: present + service_state: started + vserver: ansibleVServer + target_alias: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Stop Iscsi service + netapp.ontap.na_ontap_iscsi: + state: present + service_state: stopped + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Delete Iscsi service + netapp.ontap.na_ontap_iscsi: + state: absent + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" + +import traceback +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapISCSI: + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + service_state=dict(required=False, type='str', choices=['started', 'stopped'], default=None), + vserver=dict(required=True, type='str'), + target_alias=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.uuid = None + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.unsupported_zapi_properties = ['target_alias'] + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + for unsupported_zapi_property in self.unsupported_zapi_properties: + if self.parameters.get(unsupported_zapi_property) is not None: + msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property + self.module.fail_json(msg=msg) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + self.safe_strip() + + def safe_strip(self): + """ strip the left and right spaces of string """ + if 'target_alias' in self.parameters: + self.parameters['target_alias'] = self.parameters['target_alias'].strip() + return + + def get_iscsi(self): + """ + Return details about the iscsi service + + :return: Details about the iscsi service + :rtype: dict + """ + if self.use_rest: + return self.get_iscsi_rest() + iscsi_info = netapp_utils.zapi.NaElement('iscsi-service-get-iter') + iscsi_attributes = netapp_utils.zapi.NaElement('iscsi-service-info') + + iscsi_attributes.add_new_child('vserver', self.parameters['vserver']) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(iscsi_attributes) + + iscsi_info.add_child_elem(query) + try: + result = self.server.invoke_successfully(iscsi_info, True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error finding iscsi service in %s: %s" % (self.parameters['vserver'], to_native(e)), + exception=traceback.format_exc()) + return_value = None + + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + + iscsi = result.get_child_by_name( + 'attributes-list').get_child_by_name('iscsi-service-info') + if iscsi: + is_started = 'started' if iscsi.get_child_content('is-available') == 'true' else 'stopped' + return_value = { + 'service_state': is_started + } + return return_value + + def create_iscsi_service(self): + """ + Create iscsi service and start if requested + """ + if self.use_rest: + return self.create_iscsi_service_rest() + + iscsi_service = netapp_utils.zapi.NaElement.create_node_with_children( + 'iscsi-service-create', + **{'start': 'true' if self.parameters.get('service_state', 'started') == 'started' else 'false' + }) + + try: + self.server.invoke_successfully(iscsi_service, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error creating iscsi service: % s" % (to_native(e)), + exception=traceback.format_exc()) + + def delete_iscsi_service(self, current): + """ + Delete the iscsi service + """ + if self.use_rest: + return self.delete_iscsi_service_rest(current) + + if current['service_state'] == 'started': + self.stop_iscsi_service() + + iscsi_delete = netapp_utils.zapi.NaElement.create_node_with_children('iscsi-service-destroy') + + try: + self.server.invoke_successfully(iscsi_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error deleting iscsi service on vserver %s: %s" % (self.parameters['vserver'], to_native(e)), + exception=traceback.format_exc()) + + def stop_iscsi_service(self): + """ + Stop iscsi service + """ + + iscsi_stop = netapp_utils.zapi.NaElement.create_node_with_children('iscsi-service-stop') + + try: + self.server.invoke_successfully(iscsi_stop, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error Stopping iscsi service on vserver %s: %s" % (self.parameters['vserver'], to_native(e)), + exception=traceback.format_exc()) + + def start_iscsi_service(self): + """ + Start iscsi service + """ + iscsi_start = netapp_utils.zapi.NaElement.create_node_with_children( + 'iscsi-service-start') + + try: + self.server.invoke_successfully(iscsi_start, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error starting iscsi service on vserver %s: %s" % (self.parameters['vserver'], to_native(e)), + exception=traceback.format_exc()) + + def get_iscsi_rest(self): + api = 'protocols/san/iscsi/services' + query = {'svm.name': self.parameters['vserver']} + fields = 'svm,enabled,target.alias' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error finding iscsi service in %s: %s" % (self.parameters['vserver'], error)) + if record: + self.uuid = record['svm']['uuid'] + is_started = 'started' if record['enabled'] else 'stopped' + return { + 'service_state': is_started, + 'target_alias': "" if self.na_helper.safe_get(record, ['target', 'alias']) is None else record['target']['alias'], + } + return None + + def create_iscsi_service_rest(self): + api = 'protocols/san/iscsi/services' + body = { + 'svm.name': self.parameters['vserver'], + 'enabled': True if self.parameters.get('service_state', 'started') == 'started' else False + } + if 'target_alias' in self.parameters: + body['target.alias'] = self.parameters['target_alias'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating iscsi service: % s" % error) + + def delete_iscsi_service_rest(self, current): + # stop iscsi service before delete. + if current['service_state'] == 'started': + self.start_or_stop_iscsi_service_rest('stopped') + api = 'protocols/san/iscsi/services' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg="Error deleting iscsi service on vserver %s: %s" % (self.parameters["vserver"], error)) + + def start_or_stop_iscsi_service_rest(self, service_state): + api = 'protocols/san/iscsi/services' + enabled = True if service_state == 'started' else False + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, {'enabled': enabled}) + if error: + self.module.fail_json(msg="Error %s iscsi service on vserver %s: %s" % (service_state[0:5] + 'ing', self.parameters["vserver"], error)) + + def modify_iscsi_service_state_and_target(self, modify): + body = {} + api = 'protocols/san/iscsi/services' + if 'target_alias' in modify: + body['target.alias'] = self.parameters['target_alias'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg="Error modifying iscsi service target alias on vserver %s: %s" % (self.parameters["vserver"], error)) + + def modify_iscsi_service_rest(self, modify, current): + if self.use_rest: + if 'service_state' in modify: + self.start_or_stop_iscsi_service_rest(modify['service_state']) + if 'target_alias' in modify: + self.modify_iscsi_service_state_and_target(modify) + else: + if 'service_state' in modify: + if modify['service_state'] == 'started': + self.start_iscsi_service() + else: + self.stop_iscsi_service() + + def apply(self): + current = self.get_iscsi() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_iscsi_service() + elif cd_action == 'delete': + self.delete_iscsi_service(current) + elif modify: + self.modify_iscsi_service_rest(modify, current) + # TODO: include other details about the lun (size, etc.) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + v = NetAppOntapISCSI() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py new file mode 100644 index 000000000..1b0cda134 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_iscsi_security.py @@ -0,0 +1,350 @@ +#!/usr/bin/python + +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_iscsi_security +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete/Modify iscsi security. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_iscsi_security +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified initiator should exist or not. + default: present + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + auth_type: + description: + - Specifies the authentication type. + choices: ['chap', 'none', 'deny'] + type: str + initiator: + description: + - Specifies the name of the initiator. + required: true + type: str + address_ranges: + description: + - May be a single IPv4 or IPv6 address or a range containing a startaddress and an end address. + - The start and end addresses themselves are included in the range. + - If not present, the initiator is allowed to log in from any IP address. + type: list + elements: str + inbound_username: + description: + - Inbound CHAP username. + - Required for CHAP. A null username is not allowed. + type: str + inbound_password: + description: + - Inbound CHAP user password. + - Can not be modified. If want to change password, delete and re-create the initiator. + type: str + outbound_username: + description: + - Outbound CHAP user name. + type: str + outbound_password: + description: + - Outbound CHAP user password. + - Can not be modified. If want to change password, delete and re-create the initiator. + type: str +short_description: "NetApp ONTAP Manage iscsi security." +version_added: "19.11.0" +''' + +EXAMPLES = """ + - name: create + netapp.ontap.na_ontap_iscsi_security: + hostname: 0.0.0.0 + username: user + password: pass + vserver: test_svm + state: present + initiator: eui.9999956789abcdef + inbound_username: user_1 + inbound_password: password_1 + outbound_username: user_2 + outbound_password: password_2 + auth_type: chap + address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78 + + - name: modify outbound username + netapp.ontap.na_ontap_iscsi_security: + hostname: 0.0.0.0 + username: user + password: pass + vserver: test_svm + state: present + initiator: eui.9999956789abcdef + inbound_username: user_1 + inbound_password: password_1 + outbound_username: user_out_3 + outbound_password: password_3 + auth_type: chap + address_ranges: 10.125.10.0-10.125.10.10,10.125.193.78 + + - name: modify address + netapp.ontap.na_ontap_iscsi_security: + hostname: 0.0.0.0 + username: user + password: pass + vserver: test_svm + state: present + initiator: eui.9999956789abcdef + address_ranges: 10.125.193.90,10.125.10.20-10.125.10.30 +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppONTAPIscsiSecurity: + """ + Class with iscsi security methods + """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + auth_type=dict(required=False, type='str', choices=['chap', 'none', 'deny']), + inbound_password=dict(required=False, type='str', no_log=True), + inbound_username=dict(required=False, type='str'), + initiator=dict(required=True, type='str'), + address_ranges=dict(required=False, type='list', elements='str'), + outbound_password=dict(required=False, type='str', no_log=True), + outbound_username=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_if=[ + ['auth_type', 'chap', ['inbound_username', 'inbound_password']] + ], + required_together=[ + ['inbound_username', 'inbound_password'], + ['outbound_username', 'outbound_password'], + ], + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_iscsi_security:', 9, 6) + self.uuid = self.get_svm_uuid() + + def get_initiator(self): + """ + Get current initiator. + :return: dict of current initiator details. + """ + params = {'fields': '*', 'initiator': self.parameters['initiator']} + api = 'protocols/san/iscsi/credentials' + message, error = self.rest_api.get(api, params) + if error is not None: + self.module.fail_json(msg="Error on fetching initiator: %s" % error) + if message['num_records'] > 0: + record = message['records'][0] + initiator_details = {'auth_type': record['authentication_type']} + if initiator_details['auth_type'] == 'chap': + if record['chap'].get('inbound'): + initiator_details['inbound_username'] = record['chap']['inbound']['user'] + else: + initiator_details['inbound_username'] = None + if record['chap'].get('outbound'): + initiator_details['outbound_username'] = record['chap']['outbound']['user'] + else: + initiator_details['outbound_username'] = None + if record.get('initiator_address'): + if record['initiator_address'].get('ranges'): + ranges = [] + for address_range in record['initiator_address']['ranges']: + if address_range['start'] == address_range['end']: + ranges.append(address_range['start']) + else: + ranges.append(address_range['start'] + '-' + address_range['end']) + initiator_details['address_ranges'] = ranges + else: + initiator_details['address_ranges'] = [] + else: + initiator_details['address_ranges'] = [] + return initiator_details + + def create_initiator(self): + """ + Create initiator. + :return: None. + """ + body = { + 'authentication_type': self.parameters['auth_type'], + 'initiator': self.parameters['initiator'] + } + + if self.parameters['auth_type'] == 'chap': + chap_info = {'inbound': {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']}} + + if self.parameters.get('outbound_username'): + chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']} + body['chap'] = chap_info + address_info = self.get_address_info(self.parameters.get('address_ranges')) + if address_info is not None: + body['initiator_address'] = {'ranges': address_info} + body['svm'] = {'uuid': self.uuid, 'name': self.parameters['vserver']} + api = 'protocols/san/iscsi/credentials' + dummy, error = self.rest_api.post(api, body) + if error is not None: + self.module.fail_json(msg="Error on creating initiator: %s" % error) + + def delete_initiator(self): + """ + Delete initiator. + :return: None. + """ + api = 'protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator']) + dummy, error = self.rest_api.delete(api) + if error is not None: + self.module.fail_json(msg="Error on deleting initiator: %s" % error) + + def modify_initiator(self, modify, current): + """ + Modify initiator. + :param modify: dict of modify attributes. + :return: None. + """ + body = {} + use_chap = False + chap_update = False + chap_update_inbound = False + chap_update_outbound = False + + if modify.get('auth_type'): + body['authentication_type'] = modify.get('auth_type') + if modify['auth_type'] == 'chap': + # change in auth_type + chap_update = True + use_chap = True + elif current.get('auth_type') == 'chap': + # we're already using chap + use_chap = True + + if use_chap and (modify.get('inbound_username') or modify.get('inbound_password')): + # change in chap inbound credentials + chap_update = True + chap_update_inbound = True + + if use_chap and (modify.get('outbound_username') or modify.get('outbound_password')): + # change in chap outbound credentials + chap_update = True + chap_update_outbound = True + + if chap_update and not chap_update_inbound and 'inbound_username' in self.parameters: + # use credentials from input + chap_update_inbound = True + + if chap_update and not chap_update_outbound and 'outbound_username' in self.parameters: + # use credentials from input + chap_update_outbound = True + + if chap_update: + chap_info = dict() + # set values from self.parameters as they may not show as modified + if chap_update_inbound: + chap_info['inbound'] = {'user': self.parameters['inbound_username'], 'password': self.parameters['inbound_password']} + else: + # use current values as inbound username/password are required + chap_info['inbound'] = {'user': current.get('inbound_username'), 'password': current.get('inbound_password')} + if chap_update_outbound: + chap_info['outbound'] = {'user': self.parameters['outbound_username'], 'password': self.parameters['outbound_password']} + body['chap'] = chap_info + # PATCH fails if this is not present, even though there is no change + body['authentication_type'] = 'chap' + + address_info = self.get_address_info(modify.get('address_ranges')) + if address_info is not None: + body['initiator_address'] = {'ranges': address_info} + api = 'protocols/san/iscsi/credentials/{0}/{1}'.format(self.uuid, self.parameters['initiator']) + dummy, error = self.rest_api.patch(api, body) + if error is not None: + self.module.fail_json(msg="Error on modifying initiator: %s - params: %s" % (error, body)) + + def get_address_info(self, address_ranges): + if address_ranges is None: + return None + address_info = [] + for address in address_ranges: + address_range = {} + if '-' in address: + address_range['end'] = address.split('-')[1] + address_range['start'] = address.split('-')[0] + else: + address_range['end'] = address + address_range['start'] = address + address_info.append(address_range) + return address_info + + def apply(self): + """ + check create/delete/modify operations if needed. + :return: None. + """ + current = self.get_initiator() + action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if action == 'create': + self.create_initiator() + elif action == 'delete': + self.delete_initiator() + elif modify: + self.modify_initiator(modify, current) + result = netapp_utils.generate_result(self.na_helper.changed, action, modify) + self.module.exit_json(**result) + + def get_svm_uuid(self): + """ + Get a svm's UUID + :return: uuid of the svm. + """ + params = {'fields': 'uuid', 'name': self.parameters['vserver']} + api = "svm/svms" + message, error = self.rest_api.get(api, params) + record, error = rrh.check_for_0_or_1_records(api, message, error) + if error is not None: + self.module.fail_json(msg="Error on fetching svm uuid: %s" % error) + if record is None: + self.module.fail_json(msg="Error on fetching svm uuid, SVM not found: %s" % self.parameters['vserver']) + return message['records'][0]['uuid'] + + +def main(): + """Execute action""" + iscsi_obj = NetAppONTAPIscsiSecurity() + iscsi_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py new file mode 100644 index 000000000..a66e87b2d --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_job_schedule.py @@ -0,0 +1,477 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_job_schedule +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_job_schedule +short_description: NetApp ONTAP Job Schedule +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create/Delete/Modify job-schedules on ONTAP +options: + state: + description: + - Whether the specified job schedule should exist or not. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - The name of the job-schedule to manage. + required: true + type: str + job_minutes: + description: + - The minute(s) of each hour when the job should be run. + Job Manager cron scheduling minute. + - 1 represents all minutes. + Range is [-1..59] + - Required for create. + type: list + elements: int + job_hours: + version_added: 2.8.0 + description: + - The hour(s) of the day when the job should be run. + Job Manager cron scheduling hour. + - 1 represents all hours. + Range is [-1..23] + type: list + elements: int + job_months: + version_added: 2.8.0 + description: + - The month(s) when the job should be run. + Job Manager cron scheduling month. + - 1 represents all months. + Range is [-1..12], 0 and 12 may or may not be supported, see C(month_offset) + type: list + elements: int + job_days_of_month: + version_added: 2.8.0 + description: + - The day(s) of the month when the job should be run. + Job Manager cron scheduling day of month. + - 1 represents all days of a month from 1 to 31. + Range is [-1..31] + type: list + elements: int + job_days_of_week: + version_added: 2.8.0 + description: + - The day(s) in the week when the job should be run. + Job Manager cron scheduling day of week. + - Zero represents Sunday. -1 represents all days of a week. + Range is [-1..6] + type: list + elements: int + month_offset: + description: + - whether January starts at 0 or 1. By default, ZAPI is using a 0..11 range, while REST is using 1..12. + - default to 0 when using ZAPI, and to 1 when using REST. + - when set to 0, a value of 12 or higher is rejected. + - when set to 1, a value of 0 or of 13 or higher is rejected. + type: int + choices: [0, 1] + version_added: 21.9.0 + cluster: + description: + - Defaults to local cluster. + - In a MetroCluster configuration, user-created schedules owned by the local cluster are replicated to the partner cluster. + Likewise, user-created schedules owned by the partner cluster are replicated to the local cluster. + - Normally, only schedules owned by the local cluster can be created, modified, and deleted on the local cluster. + However, when a MetroCluster configuration is in switchover, the cluster in switchover state can + create, modify, and delete schedules owned by the partner cluster. + type: str + version_added: 21.22.0 +''' + +EXAMPLES = """ + - name: Create Job for 11.30PM at 10th of every month + netapp.ontap.na_ontap_job_schedule: + state: present + name: jobName + job_minutes: 30 + job_hours: 23 + job_days_of_month: 10 + job_months: -1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Create Job for 11.30PM at 10th of January, April, July, October for ZAPI and REST + netapp.ontap.na_ontap_job_schedule: + state: present + name: jobName + job_minutes: 30 + job_hours: 23 + job_days_of_month: 10 + job_months: 1,4,7,10 + month_offset: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Create Job for 11.30PM at 10th of January, April, July, October for ZAPI and REST + netapp.ontap.na_ontap_job_schedule: + state: present + name: jobName + job_minutes: 30 + job_hours: 23 + job_days_of_month: 10 + job_months: 0,3,6,9 + month_offset: 0 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Create Job for 11.30PM at 10th of January when using REST and February when using ZAPI !!! + netapp.ontap.na_ontap_job_schedule: + state: present + name: jobName + job_minutes: 30 + job_hours: 23 + job_days_of_month: 10 + job_months: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete Job + netapp.ontap.na_ontap_job_schedule: + state: absent + name: jobName + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" + + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPJob: + '''Class with job schedule cron methods''' + + def __init__(self): + + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + job_minutes=dict(required=False, type='list', elements='int'), + job_months=dict(required=False, type='list', elements='int'), + job_hours=dict(required=False, type='list', elements='int'), + job_days_of_month=dict(required=False, type='list', elements='int'), + job_days_of_week=dict(required=False, type='list', elements='int'), + month_offset=dict(required=False, type='int', choices=[0, 1]), + cluster=dict(required=False, type='str') + )) + + self.uuid = None + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + self.month_offset = self.parameters.get('month_offset') + if self.month_offset is None: + # maintain backward compatibility + self.month_offset = 1 if self.use_rest else 0 + if self.month_offset == 1 and self.parameters.get('job_months') and 0 in self.parameters['job_months']: + # we explictly test for 0 as it would be converted to -1, which has a special meaning (all). + # other value errors will be reported by the API. + self.module.fail_json(msg='Error: 0 is not a valid value in months if month_offset is set to 1: %s' % self.parameters['job_months']) + + if self.use_rest: + self.set_playbook_api_key_map() + elif not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + self.set_playbook_zapi_key_map() + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_string_keys = { + 'name': 'job-schedule-name', + 'cluster': 'job-schedule-cluster' + } + self.na_helper.zapi_list_keys = { + 'job_minutes': ('job-schedule-cron-minute', 'cron-minute'), + 'job_months': ('job-schedule-cron-month', 'cron-month'), + 'job_hours': ('job-schedule-cron-hour', 'cron-hour'), + 'job_days_of_month': ('job-schedule-cron-day', 'cron-day-of-month'), + 'job_days_of_week': ('job-schedule-cron-day-of-week', 'cron-day-of-week') + } + + def set_playbook_api_key_map(self): + self.na_helper.params_to_rest_api_keys = { + 'job_minutes': 'minutes', + 'job_months': 'months', + 'job_hours': 'hours', + 'job_days_of_month': 'days', + 'job_days_of_week': 'weekdays' + } + + def get_job_schedule_rest(self): + """ + Return details about the job + :param: + name : Job name + :return: Details about the Job. None if not found. + :rtype: dict + """ + query = {'name': self.parameters['name']} + if self.parameters.get('cluster'): + query['cluster'] = self.parameters['cluster'] + record, error = rest_generic.get_one_record(self.rest_api, 'cluster/schedules', query, 'uuid,cron') + if error is not None: + self.module.fail_json(msg="Error fetching job schedule: %s" % error) + if record: + self.uuid = record['uuid'] + job_details = {'name': record['name']} + for param_key, rest_key in self.na_helper.params_to_rest_api_keys.items(): + if rest_key in record['cron']: + job_details[param_key] = record['cron'][rest_key] + else: + # if any of the job_hours, job_minutes, job_months, job_days are empty: + # it means the value is -1 using ZAPI convention + job_details[param_key] = [-1] + # adjust offsets if necessary + if 'job_months' in job_details and self.month_offset == 0: + job_details['job_months'] = [x - 1 if x > 0 else x for x in job_details['job_months']] + # adjust minutes if necessary, -1 means all in ZAPI and for our user facing parameters + # while REST returns all values + if 'job_minutes' in job_details and len(job_details['job_minutes']) == 60: + job_details['job_minutes'] = [-1] + return job_details + return None + + def get_job_schedule(self): + """ + Return details about the job + :param: + name : Job name + :return: Details about the Job. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_job_schedule_rest() + + job_get_iter = netapp_utils.zapi.NaElement('job-schedule-cron-get-iter') + query = {'job-schedule-cron-info': {'job-schedule-name': self.parameters['name']}} + if self.parameters.get('cluster'): + query['job-schedule-cron-info']['job-schedule-cluster'] = self.parameters['cluster'] + job_get_iter.translate_struct({'query': query}) + try: + result = self.server.invoke_successfully(job_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching job schedule %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + job_details = None + # check if job exists + if result.get_child_by_name('num-records') and int(result['num-records']) >= 1: + job_info = result['attributes-list']['job-schedule-cron-info'] + job_details = {} + for item_key, zapi_key in self.na_helper.zapi_string_keys.items(): + job_details[item_key] = job_info[zapi_key] + for item_key, zapi_key in self.na_helper.zapi_list_keys.items(): + parent, dummy = zapi_key + job_details[item_key] = self.na_helper.get_value_for_list(from_zapi=True, + zapi_parent=job_info.get_child_by_name(parent) + ) + if item_key == 'job_months' and self.month_offset == 1: + job_details[item_key] = [int(x) + 1 if int(x) >= 0 else int(x) for x in job_details[item_key]] + elif item_key == 'job_minutes' and len(job_details[item_key]) == 60: + job_details[item_key] = [-1] + else: + job_details[item_key] = [int(x) for x in job_details[item_key]] + # if any of the job_hours, job_minutes, job_months, job_days are empty: + # it means the value is -1 for ZAPI + if not job_details[item_key]: + job_details[item_key] = [-1] + return job_details + + def add_job_details(self, na_element_object, values): + """ + Add children node for create or modify NaElement object + :param na_element_object: modify or create NaElement object + :param values: dictionary of cron values to be added + :return: None + """ + for item_key, item_value in values.items(): + if item_key in self.na_helper.zapi_string_keys: + zapi_key = self.na_helper.zapi_string_keys.get(item_key) + na_element_object[zapi_key] = item_value + elif item_key in self.na_helper.zapi_list_keys: + parent_key, child_key = self.na_helper.zapi_list_keys.get(item_key) + data = item_value + if data: + if item_key == 'job_months' and self.month_offset == 1: + # -1 is a special value + data = [str(x - 1) if x > 0 else str(x) for x in data] + else: + data = [str(x) for x in data] + na_element_object.add_child_elem(self.na_helper.get_value_for_list(from_zapi=False, + zapi_parent=parent_key, + zapi_child=child_key, + data=data)) + + def create_job_schedule(self): + """ + Creates a job schedule + """ + if self.use_rest: + cron = {} + for param_key, rest_key in self.na_helper.params_to_rest_api_keys.items(): + # -1 means all in zapi, while empty means all in api. + if self.parameters.get(param_key): + if len(self.parameters[param_key]) == 1 and self.parameters[param_key][0] == -1: + # need to set empty value for minutes as this is a required parameter + if rest_key == 'minutes': + cron[rest_key] = [] + elif param_key == 'job_months' and self.month_offset == 0: + cron[rest_key] = [x + 1 if x >= 0 else x for x in self.parameters[param_key]] + else: + cron[rest_key] = self.parameters[param_key] + + params = { + 'name': self.parameters['name'], + 'cron': cron + } + if self.parameters.get('cluster'): + params['cluster'] = self.parameters['cluster'] + api = 'cluster/schedules' + dummy, error = self.rest_api.post(api, params) + if error is not None: + self.module.fail_json(msg="Error creating job schedule: %s" % error) + + else: + job_schedule_create = netapp_utils.zapi.NaElement('job-schedule-cron-create') + self.add_job_details(job_schedule_create, self.parameters) + try: + self.server.invoke_successfully(job_schedule_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating job schedule %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_job_schedule(self): + """ + Delete a job schedule + """ + if self.use_rest: + api = 'cluster/schedules/' + self.uuid + dummy, error = self.rest_api.delete(api) + if error is not None: + self.module.fail_json(msg="Error deleting job schedule: %s" % error) + else: + job_schedule_delete = netapp_utils.zapi.NaElement('job-schedule-cron-destroy') + self.add_job_details(job_schedule_delete, self.parameters) + try: + self.server.invoke_successfully(job_schedule_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting job schedule %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_job_schedule(self, modify, current): + """ + modify a job schedule + """ + + def set_cron(param_key, rest_key, params, cron): + # -1 means all in zapi, while empty means all in api. + if params[param_key] == [-1]: + cron[rest_key] = [] + elif param_key == 'job_months' and self.month_offset == 0: + cron[rest_key] = [x + 1 for x in params[param_key]] + else: + cron[rest_key] = params[param_key] + + if self.use_rest: + cron = {} + for param_key, rest_key in self.na_helper.params_to_rest_api_keys.items(): + if modify.get(param_key): + set_cron(param_key, rest_key, modify, cron) + elif current.get(param_key): + # Usually only include modify attributes, but omitting an attribute means all in api. + # Need to add the current attributes in params. + set_cron(param_key, rest_key, current, cron) + params = { + 'cron': cron + } + api = 'cluster/schedules/' + self.uuid + dummy, error = self.rest_api.patch(api, params) + if error is not None: + self.module.fail_json(msg="Error modifying job schedule: %s" % error) + else: + job_schedule_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'job-schedule-cron-modify', **{'job-schedule-name': self.parameters['name']}) + self.add_job_details(job_schedule_modify, modify) + try: + self.server.invoke_successfully(job_schedule_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying job schedule %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + """ + Apply action to job-schedule + """ + modify = None + current = self.get_job_schedule() + action = self.na_helper.get_cd_action(current, self.parameters) + if action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if action == 'create' and self.parameters.get('job_minutes') is None: + # job_minutes is mandatory for create + self.module.fail_json(msg='Error: missing required parameter job_minutes for create') + + if self.na_helper.changed and not self.module.check_mode: + if action == 'create': + self.create_job_schedule() + elif action == 'delete': + self.delete_job_schedule() + elif modify: + self.modify_job_schedule(modify, current) + result = netapp_utils.generate_result(self.na_helper.changed, action, modify) + self.module.exit_json(**result) + + +def main(): + '''Execute action''' + job_obj = NetAppONTAPJob() + job_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py new file mode 100644 index 000000000..de8b48e05 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_interface.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_kerberos_interface +short_description: NetApp ONTAP module to modify kerberos interface. +description: + - Enable or disable kerberos interface. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.6.0' +author: NetApp Ansible Team (@carchi8py) +options: + state: + description: + - Modify kerberos interface, only present is supported. + choices: ['present'] + type: str + default: present + + interface_name: + description: + - Specifies the name of the logical interface associated with the NFS Kerberos configuration you want to modify. + type: str + required: true + + vserver: + description: + - Specifies the Vserver associated with the NFS Kerberos configuration you want to modify. + type: str + required: true + + enabled: + description: + - Specifies whether to enable or disable Kerberos for NFS on the specified Vserver and logical interface. + - C(service_principal_name) is required when try to enable kerberos. + type: bool + required: true + + keytab_uri: + description: + - Specifies loading a keytab file from the specified URI. + - This value must be in the form of "(ftp|http|https)://(hostname|IPv4 Address|'['IPv6 Address']')...". + type: str + + machine_account: + description: + - Specifies the machine account to create in Active Directory. + - Requires ONTAP 9.12.1 or later. + type: str + + organizational_unit: + description: + - Specifies the organizational unit (OU) under which the Microsoft Active Directory server account will be created + when you enable Kerberos using a realm for Microsoft KDC + type: str + + admin_username: + description: + - Specifies the administrator username. + type: str + + admin_password: + description: + - Specifies the administrator password. + type: str + + service_principal_name: + description: + - Specifies the service principal name (SPN) of the Kerberos configuration you want to modify. + - This value must be in the form nfs/host_name@REALM. + - host_name is the fully qualified host name of the Kerberos server, nfs is the service, and REALM is the name of the Kerberos realm. + - Specify Kerberos realm names in uppercase. + aliases: ['spn'] + type: str + +notes: + - Supports check_mode. + - Module supports only REST and requires ONTAP 9.7 or later. +''' + +EXAMPLES = ''' + + - name: Enable kerberos interface. + netapp.ontap.na_ontap_kerberos_interface: + interface_name: lif_svm1_284 + vserver: ansibleSVM + enabled: true + service_principal_name: nfs/lif_svm1_284@RELAM2 + admin_username: "{{ admin_user }}" + admin_password: "{{ admin_pass }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: "{{ https }}" + validate_certs: "{{ certs }}" + + + - name: Disable kerberos interface. + netapp.ontap.na_ontap_kerberos_interface: + interface_name: lif_svm1_284 + vserver: ansibleSVM + enabled: false + service_principal_name: nfs/lif_svm1_284@RELAM2 + admin_username: "{{ admin_user }}" + admin_password: "{{ admin_pass }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: "{{ https }}" + validate_certs: "{{ certs }}" + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapKerberosInterface: + """Modify Kerberos interface""" + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + interface_name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + enabled=dict(required=True, type='bool'), + keytab_uri=dict(required=False, type='str', no_log=True), + machine_account=dict(required=False, type='str'), + organizational_unit=dict(required=False, type='str'), + admin_username=dict(required=False, type='str'), + admin_password=dict(required=False, type='str', no_log=True), + service_principal_name=dict(required=False, type='str', aliases=['spn']) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_if=[('enabled', True, ['service_principal_name'])], + required_together=[('admin_username', 'admin_password')], + mutually_exclusive=[('keytab_uri', 'machine_account')] + ) + + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_kerberos_interface', 9, 7) + self.rest_api.is_rest_supported_properties(self.parameters, None, [['machine_account', (9, 12, 1)]]) + self.uuid = None + + def get_kerberos_interface(self): + """ + Get kerberos interface. + """ + api = 'protocols/nfs/kerberos/interfaces' + query = { + 'interface.name': self.parameters['interface_name'], + 'svm.name': self.parameters['vserver'], + 'fields': 'interface.uuid,enabled,spn' + } + if 'machine_account' in self.parameters: + query['fields'] += ',machine_account' + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching kerberos interface info %s: %s' % (self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + if record is None: + self.module.fail_json(msg='Error: Kerberos interface config does not exist for %s' % self.parameters['interface_name']) + self.uuid = self.na_helper.safe_get(record, ['interface', 'uuid']) + return { + 'enabled': record.get('enabled') + } + + def modify_kerberos_interface(self): + """ + Modify kerberos interface. + """ + api = 'protocols/nfs/kerberos/interfaces' + body = {'enabled': self.parameters['enabled']} + if 'keytab_uri' in self.parameters: + body['keytab_uri'] = self.parameters['keytab_uri'] + if 'organizational_unit' in self.parameters: + body['organizational_unit'] = self.parameters['organizational_unit'] + if 'service_principal_name' in self.parameters: + body['spn'] = self.parameters['service_principal_name'] + if 'admin_username' in self.parameters: + body['user'] = self.parameters['admin_username'] + if 'admin_password' in self.parameters: + body['password'] = self.parameters['admin_password'] + if 'machine_account' in self.parameters: + body['machine_account'] = self.parameters['machine_account'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg='Error modifying kerberos interface %s: %s.' % (self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + modify = self.na_helper.get_modified_attributes(self.get_kerberos_interface(), self.parameters) + if self.na_helper.changed and not self.module.check_mode: + self.modify_kerberos_interface() + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + kerberos_obj = NetAppOntapKerberosInterface() + kerberos_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py new file mode 100644 index 000000000..9cb4c346b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_kerberos_realm.py @@ -0,0 +1,438 @@ +#!/usr/bin/python +''' +(c) 2019, Red Hat, Inc +(c) 2019-2022, NetApp, Inc +GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_kerberos_realm + +short_description: NetApp ONTAP vserver nfs kerberos realm +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: Milan Zink (@zeten30) , + +description: + - Create, modify or delete vserver kerberos realm configuration + +options: + + state: + description: + - Whether the Kerberos realm is present or absent. + choices: ['present', 'absent'] + default: 'present' + type: str + + vserver: + description: + - vserver/svm with kerberos realm configured + required: true + type: str + + realm: + description: + - Kerberos realm name + required: true + type: str + + kdc_vendor: + description: + - The vendor of the Key Distribution Centre (KDC) server + - Required if I(state=present) + choices: ['other', 'microsoft'] + type: str + + kdc_ip: + description: + - IP address of the Key Distribution Centre (KDC) server + - Required if I(state=present) + type: str + + kdc_port: + description: + - TCP port on the KDC to be used for Kerberos communication. + - The default for this parameter is 88. + type: int + + clock_skew: + description: + - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock. + - The default for this parameter is '5' minutes. + - This option is not supported with REST. + type: str + + comment: + description: + - Optional comment + type: str + + admin_server_ip: + description: + - IP address of the host where the Kerberos administration daemon is running. This is usually the master KDC. + - If this parameter is omitted, the address specified in kdc_ip is used. + - This option is not supported with REST. + type: str + + admin_server_port: + description: + - The TCP port on the Kerberos administration server where the Kerberos administration service is running. + - The default for this parmater is '749'. + - This option is not supported with REST. + type: str + + pw_server_ip: + description: + - IP address of the host where the Kerberos password-changing server is running. + - Typically, this is the same as the host indicated in the adminserver-ip. + - If this parameter is omitted, the IP address in kdc-ip is used. + - This option is not supported with REST. + type: str + + pw_server_port: + description: + - The TCP port on the Kerberos password-changing server where the Kerberos password-changing service is running. + - The default for this parameter is '464'. + - This option is not supported with REST. + type: str + + ad_server_ip: + description: + - IP Address of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'. + type: str + version_added: '20.4.0' + + ad_server_name: + description: + - Host name of the Active Directory Domain Controller (DC). This is a mandatory parameter if the kdc-vendor is 'microsoft'. + type: str + version_added: '20.4.0' + +notes: + - supports ZAPI and REST. REST requires ONTAP 9.6 or later. + - supports check mode. +''' + +EXAMPLES = ''' + + - name: Create kerberos realm other kdc vendor + netapp.ontap.na_ontap_kerberos_realm: + state: present + realm: 'EXAMPLE.COM' + vserver: 'vserver1' + kdc_ip: '1.2.3.4' + kdc_vendor: 'other' + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create kerberos realm Microsoft kdc vendor + netapp.ontap.na_ontap_kerberos_realm: + state: present + realm: 'EXAMPLE.COM' + vserver: 'vserver1' + kdc_ip: '1.2.3.4' + kdc_vendor: 'microsoft' + ad_server_ip: '0.0.0.0' + ad_server_name: 'server' + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +''' + +RETURN = ''' +''' + +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapKerberosRealm: + ''' + Kerberos Realm definition class + ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + admin_server_ip=dict(required=False, type='str'), + admin_server_port=dict(required=False, type='str'), + clock_skew=dict(required=False, type='str'), + comment=dict(required=False, type='str'), + kdc_ip=dict(required=False, type='str'), + kdc_port=dict(required=False, type='int'), + kdc_vendor=dict(required=False, type='str', + choices=['microsoft', 'other']), + pw_server_ip=dict(required=False, type='str'), + pw_server_port=dict(required=False, type='str'), + realm=dict(required=True, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + ad_server_ip=dict(required=False, type='str'), + ad_server_name=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['kdc_vendor', 'kdc_ip']), + ('kdc_vendor', 'microsoft', ['ad_server_ip', 'ad_server_name']) + ] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + unsupported_rest_properties = ['admin_server_ip', 'admin_server_port', 'clock_skew', 'pw_server_ip', 'pw_server_port'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + self.svm_uuid = None + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + self.simple_attributes = [ + 'admin_server_ip', + 'admin_server_port', + 'clock_skew', + 'kdc_ip', + 'kdc_vendor', + ] + + def get_krbrealm(self): + ''' + Checks if Kerberos Realm config exists. + + :return: + kerberos realm object if found + None if not found + :rtype: object/None + ''' + if self.use_rest: + return self.get_krbrealm_rest() + + # Make query + krbrealm_info = netapp_utils.zapi.NaElement('kerberos-realm-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm', **{'realm': self.parameters['realm'], + 'vserver-name': self.parameters['vserver']}) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + krbrealm_info.add_child_elem(query) + + try: + result = self.server.invoke_successfully(krbrealm_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching kerberos realm %s: %s' % (self.parameters['realm'], to_native(error))) + + # Get Kerberos Realm details + krbrealm_details = None + if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1): + attributes_list = result.get_child_by_name('attributes-list') + config_info = attributes_list.get_child_by_name('kerberos-realm') + + krbrealm_details = { + 'admin_server_ip': config_info.get_child_content('admin-server-ip'), + 'admin_server_port': config_info.get_child_content('admin-server-port'), + 'clock_skew': config_info.get_child_content('clock-skew'), + 'kdc_ip': config_info.get_child_content('kdc-ip'), + 'kdc_port': int(config_info.get_child_content('kdc-port')), + 'kdc_vendor': config_info.get_child_content('kdc-vendor'), + 'pw_server_ip': config_info.get_child_content('password-server-ip'), + 'pw_server_port': config_info.get_child_content('password-server-port'), + 'realm': config_info.get_child_content('realm'), + 'vserver': config_info.get_child_content('vserver-name'), + 'ad_server_ip': config_info.get_child_content('ad-server-ip'), + 'ad_server_name': config_info.get_child_content('ad-server-name'), + 'comment': config_info.get_child_content('comment') + } + + return krbrealm_details + + def create_krbrealm(self): + '''supported + Create Kerberos Realm configuration + ''' + if self.use_rest: + return self.create_krbrealm_rest() + + options = { + 'realm': self.parameters['realm'] + } + + # Other options/attributes + for attribute in self.simple_attributes: + if self.parameters.get(attribute) is not None: + options[str(attribute).replace('_', '-')] = self.parameters[attribute] + + if self.parameters.get('kdc_port'): + options['kdc-port'] = str(self.parameters['kdc_port']) + if self.parameters.get('pw_server_ip') is not None: + options['password-server-ip'] = self.parameters['pw_server_ip'] + if self.parameters.get('pw_server_port') is not None: + options['password-server-port'] = self.parameters['pw_server_port'] + + if self.parameters.get('ad_server_ip') is not None: + options['ad-server-ip'] = self.parameters['ad_server_ip'] + if self.parameters.get('ad_server_name') is not None: + options['ad-server-name'] = self.parameters['ad_server_name'] + + # Initialize NaElement + krbrealm_create = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-create', **options) + + # Try to create Kerberos Realm configuration + try: + self.server.invoke_successfully(krbrealm_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error creating Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(errcatch)), + exception=traceback.format_exc()) + + def delete_krbrealm(self): + ''' + Delete Kerberos Realm configuration + ''' + if self.use_rest: + return self.delete_krbrealm_rest() + krbrealm_delete = netapp_utils.zapi.NaElement.create_node_with_children('kerberos-realm-delete', **{'realm': self.parameters['realm']}) + try: + self.server.invoke_successfully(krbrealm_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error deleting Kerberos Realm configuration %s: %s' % ( + self.parameters['realm'], to_native(errcatch)), exception=traceback.format_exc()) + + def modify_krbrealm(self, modify): + ''' + Modify Kerberos Realm + :param modify: list of modify attributes + ''' + if self.use_rest: + return self.modify_krbrealm_rest(modify) + krbrealm_modify = netapp_utils.zapi.NaElement('kerberos-realm-modify') + krbrealm_modify.add_new_child('realm', self.parameters['realm']) + + for attribute in modify: + if attribute in self.simple_attributes: + krbrealm_modify.add_new_child(str(attribute).replace('_', '-'), self.parameters[attribute]) + if attribute == 'kdc_port': + krbrealm_modify.add_new_child('kdc-port', str(self.parameters['kdc_port'])) + if attribute == 'pw_server_ip': + krbrealm_modify.add_new_child('password-server-ip', self.parameters['pw_server_ip']) + if attribute == 'pw_server_port': + krbrealm_modify.add_new_child('password-server-port', self.parameters['pw_server_port']) + if attribute == 'ad_server_ip': + krbrealm_modify.add_new_child('ad-server-ip', self.parameters['ad_server_ip']) + if attribute == 'ad_server_name': + krbrealm_modify.add_new_child('ad-server-name', self.parameters['ad_server_name']) + if attribute == 'comment': + krbrealm_modify.add_new_child('comment', self.parameters['comment']) + + # Try to modify Kerberos Realm + try: + self.server.invoke_successfully(krbrealm_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error modifying Kerberos Realm %s: %s' % (self.parameters['realm'], to_native(errcatch)), + exception=traceback.format_exc()) + + def get_krbrealm_rest(self): + api = 'protocols/nfs/kerberos/realms' + params = { + 'name': self.parameters['realm'], + 'svm.name': self.parameters['vserver'], + 'fields': 'kdc,ad_server,svm,comment' + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching kerberos realm %s: %s' % (self.parameters['realm'], to_native(error))) + if record: + self.svm_uuid = record['svm']['uuid'] + return { + 'kdc_ip': self.na_helper.safe_get(record, ['kdc', 'ip']), + 'kdc_port': self.na_helper.safe_get(record, ['kdc', 'port']), + 'kdc_vendor': self.na_helper.safe_get(record, ['kdc', 'vendor']), + 'ad_server_ip': self.na_helper.safe_get(record, ['ad_server', 'address']), + 'ad_server_name': self.na_helper.safe_get(record, ['ad_server', 'name']), + 'comment': self.na_helper.safe_get(record, ['comment']) + } + return None + + def create_krbrealm_rest(self): + api = 'protocols/nfs/kerberos/realms' + body = { + 'name': self.parameters['realm'], + 'svm.name': self.parameters['vserver'], + 'kdc.ip': self.parameters['kdc_ip'], + 'kdc.vendor': self.parameters['kdc_vendor'] + } + if self.parameters.get('kdc_port'): + body['kdc.port'] = self.parameters['kdc_port'] + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + if self.parameters['kdc_vendor'] == 'microsoft': + body['ad_server.address'] = self.parameters['ad_server_ip'] + body['ad_server.name'] = self.parameters['ad_server_name'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(error))) + + def modify_krbrealm_rest(self, modify): + api = 'protocols/nfs/kerberos/realms/%s' % self.svm_uuid + body = {} + if modify.get('kdc_ip'): + body['kdc.ip'] = modify['kdc_ip'] + if modify.get('kdc_vendor'): + body['kdc.vendor'] = modify['kdc_vendor'] + if modify.get('kdc_port'): + body['kdc.port'] = modify['kdc_port'] + if modify.get('comment'): + body['comment'] = modify['comment'] + if modify.get('ad_server_ip'): + body['ad_server.address'] = modify['ad_server_ip'] + if modify.get('ad_server_name'): + body['ad_server.name'] = modify['ad_server_name'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['realm'], body) + if error: + self.module.fail_json(msg='Error modifying Kerberos Realm %s: %s' % (self.parameters['realm'], to_native(error))) + + def delete_krbrealm_rest(self): + api = 'protocols/nfs/kerberos/realms/%s' % self.svm_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['realm']) + if error: + self.module.fail_json(msg='Error deleting Kerberos Realm configuration %s: %s' % (self.parameters['realm'], to_native(error))) + + def apply(self): + '''Call create/modify/delete operations.''' + current = self.get_krbrealm() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_krbrealm() + elif cd_action == 'delete': + self.delete_krbrealm() + elif modify: + self.modify_krbrealm(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''ONTAP Kerberos Realm''' + krbrealm = NetAppOntapKerberosRealm() + krbrealm.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py new file mode 100644 index 000000000..d75d17ee9 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +''' +(c) 2018-2022, NetApp, Inc +GNU General Public License v3.0+ +(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_ldap + +short_description: NetApp ONTAP LDAP +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: 2.9.0 +author: Milan Zink (@zeten30) / + +description: +- Create, modify or delete LDAP on NetApp ONTAP SVM/vserver + +options: + + state: + description: + - Whether the LDAP is present or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + vserver: + description: + - vserver/svm configured to use LDAP + required: true + type: str + + name: + description: + - The name of LDAP client configuration + required: true + type: str + + skip_config_validation: + description: + - Skip LDAP validation + choices: ['true', 'false'] + type: str +''' + +EXAMPLES = ''' + + - name: Enable LDAP on SVM + netapp.ontap.na_ontap_ldap: + state: present + name: 'example_ldap' + vserver: 'vserver1' + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +''' + +RETURN = ''' +''' + +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppOntapLDAP: + ''' + LDAP Client definition class + ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + skip_config_validation=dict(required=False, default=None, choices=['true', 'false']), + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + msg = 'Error: na_ontap_ldap only supports ZAPI.netapp.ontap.na_ontap_ldap_client should be used instead.' + self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_ldap(self, client_config_name=None): + ''' + Checks if LDAP config exists. + + :return: + ldap config object if found + None if not found + :rtype: object/None + ''' + # Make query + config_info = netapp_utils.zapi.NaElement('ldap-config-get-iter') + + if client_config_name is None: + client_config_name = self.parameters['name'] + + query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config', **{'client-config': client_config_name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + config_info.add_child_elem(query) + + result = self.server.invoke_successfully(config_info, enable_tunneling=True) + + # Get LDAP configuration details + config_details = None + if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1): + attributes_list = result.get_child_by_name('attributes-list') + config_info = attributes_list.get_child_by_name('ldap-config') + + # Define config details structure + config_details = {'client_config': config_info.get_child_content('client-config'), + 'skip_config_validation': config_info.get_child_content('skip-config-validation'), + 'vserver': config_info.get_child_content('vserver')} + + return config_details + + def create_ldap(self): + ''' + Create LDAP configuration + ''' + options = { + 'client-config': self.parameters['name'], + 'client-enabled': 'true' + } + + if self.parameters.get('skip_config_validation') is not None: + options['skip-config-validation'] = self.parameters['skip_config_validation'] + + # Initialize NaElement + ldap_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-create', **options) + + # Try to create LDAP configuration + try: + self.server.invoke_successfully(ldap_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error creating LDAP configuration %s: %s' % (self.parameters['name'], to_native(errcatch)), + exception=traceback.format_exc()) + + def delete_ldap(self): + ''' + Delete LDAP configuration + ''' + ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children('ldap-config-delete', **{}) + + try: + self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error deleting LDAP configuration %s: %s' % ( + self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc()) + + def modify_ldap(self, modify): + ''' + Modify LDAP + :param modify: list of modify attributes + ''' + ldap_modify = netapp_utils.zapi.NaElement('ldap-config-modify') + ldap_modify.add_new_child('client-config', self.parameters['name']) + + for attribute in modify: + if attribute == 'skip_config_validation': + ldap_modify.add_new_child('skip-config-validation', self.parameters[attribute]) + + # Try to modify LDAP + try: + self.server.invoke_successfully(ldap_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error modifying LDAP %s: %s' % (self.parameters['name'], to_native(errcatch)), + exception=traceback.format_exc()) + + def apply(self): + '''Call create/modify/delete operations.''' + current = self.get_ldap() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_ldap() + elif cd_action == 'delete': + self.delete_ldap() + elif modify: + self.modify_ldap(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +# +# MAIN +# +def main(): + '''ONTAP LDAP client configuration''' + ldapclient = NetAppOntapLDAP() + ldapclient.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py new file mode 100644 index 000000000..8a3103b7d --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ldap_client.py @@ -0,0 +1,550 @@ +#!/usr/bin/python +''' +(c) 2018-2023, NetApp, Inc +GNU General Public License v3.0+ +(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_ldap_client + +short_description: NetApp ONTAP LDAP client +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: Milan Zink (@zeten30) / + +description: + - Create, modify or delete LDAP client on NetApp ONTAP. + +options: + + state: + description: + - Whether the specified LDAP client configuration exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + vserver: + description: + - vserver/svm that holds LDAP client configuration. + required: true + type: str + + name: + description: + - The name of LDAP client configuration. + - Supported only in ZAPI. + - Required with ZAPI. + type: str + + servers: + description: + - Comma separated list of LDAP servers. FQDN's or IP addreses. + - servers or ad_domain is required if I(state=present). + - Mutually exclusive with preferred_ad_servers and ad_domain. + type: list + elements: str + aliases: ['ldap_servers'] + + schema: + description: + - LDAP schema. + - Required if I(state=present). + - default schemas - 'AD-IDMU', 'AD-SFU', 'MS-AD-BIS', 'RFC-2307'. + - custom schemas are allowed as well. + type: str + + ad_domain: + description: + - Active Directory Domain Name. + - servers or ad_domain is required if I(state=present). + - Mutually exclusive with servers. + type: str + + base_dn: + description: + - LDAP base DN. + type: str + + base_scope: + description: + - LDAP search scope. + choices: ['subtree', 'onelevel', 'base'] + type: str + + bind_as_cifs_server: + description: + - The cluster uses the CIFS server's credentials to bind to the LDAP server. + type: bool + + preferred_ad_servers: + description: + - Preferred Active Directory (AD) Domain Controllers. + - Mutually exclusive with servers. + type: list + elements: str + + port: + description: + - LDAP server TCP port. + type: int + aliases: ['tcp_port'] + version_added: 21.3.0 + + query_timeout: + description: + - LDAP server query timeout. + type: int + + min_bind_level: + description: + - Minimal LDAP server bind level. + choices: ['anonymous', 'simple', 'sasl'] + type: str + + bind_dn: + description: + - LDAP bind user DN. + type: str + + bind_password: + description: + - LDAP bind user password. + type: str + + use_start_tls: + description: + - Start TLS on LDAP connection. + type: bool + + referral_enabled: + description: + - LDAP Referral Chasing. + type: bool + + session_security: + description: + - Client Session Security. + choices: ['none', 'sign', 'seal'] + type: str + + ldaps_enabled: + description: + - Specifies whether or not LDAPS is enabled. + type: bool + version_added: 21.22.0 + + skip_config_validation: + description: + - Indicates whether or not the validation for the specified LDAP configuration is disabled. + - By default, errors are reported with REST when server names cannot be resolved for instance. + - Requires ONTAP 9.9 or later. + - This is ignored with ZAPI. + type: bool + version_added: 22.0.0 + +notes: + - LDAP client created using ZAPI should be deleted using ZAPI. + - LDAP client created using REST should be deleted using REST. + - REST only supports create, modify and delete data svm ldap client configuration. + +''' + +EXAMPLES = ''' + + - name: Create LDAP client + # assuming credentials are set using module_defaults + netapp.ontap.na_ontap_ldap_client: + state: present + vserver: 'vserver1' + servers: 'ldap1.example.company.com,ldap2.example.company.com' + base_dn: 'dc=example,dc=company,dc=com' + + - name: modify LDAP client + # assuming credentials are set using module_defaults + netapp.ontap.na_ontap_ldap_client: + state: present + vserver: 'vserver1' + servers: 'ldap1.example.company.com' + base_dn: 'dc=example,dc=company,dc=com' + skip_config_validation: true + + - name: Delete LDAP client + # assuming credentials are set using module_defaults + netapp.ontap.na_ontap_ldap_client: + state: absent + vserver: 'vserver1' +''' + +RETURN = ''' +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppOntapLDAPClient: + ''' + LDAP Client definition class + ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + ad_domain=dict(required=False, default=None, type='str'), + base_dn=dict(required=False, type='str'), + base_scope=dict(required=False, default=None, choices=['subtree', 'onelevel', 'base']), + bind_as_cifs_server=dict(required=False, type='bool'), + bind_dn=dict(required=False, default=None, type='str'), + bind_password=dict(type='str', required=False, default=None, no_log=True), + name=dict(required=False, type='str'), + servers=dict(required=False, type='list', elements='str', aliases=['ldap_servers']), + min_bind_level=dict(required=False, default=None, choices=['anonymous', 'simple', 'sasl']), + preferred_ad_servers=dict(required=False, type='list', elements='str'), + port=dict(required=False, type='int', aliases=['tcp_port']), + query_timeout=dict(required=False, default=None, type='int'), + referral_enabled=dict(required=False, type='bool'), + schema=dict(required=False, type='str'), + session_security=dict(required=False, default=None, choices=['none', 'sign', 'seal']), + state=dict(required=False, choices=['present', 'absent'], default='present'), + use_start_tls=dict(required=False, type='bool'), + vserver=dict(required=True, type='str'), + ldaps_enabled=dict(required=False, type='bool'), + skip_config_validation=dict(required=False, type='bool'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['schema']), + ], + mutually_exclusive=[ + ['servers', 'ad_domain'], + ['servers', 'preferred_ad_servers'], + ['use_start_tls', 'ldaps_enabled'] + ], + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['name'] + partially_supported_rest_properties = [['bind_as_cifs_server', (9, 9, 0)], ['query_timeout', (9, 9, 0)], ['referral_enabled', (9, 9, 0)], + ['ldaps_enabled', (9, 9, 0)], ['skip_config_validation', (9, 9, 0)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + if not self.parameters.get('name'): + self.module.fail_json(msg="Error: name is a required field with ZAPI.") + + self.simple_attributes = [ + 'ad_domain', + 'base_dn', + 'base_scope', + 'bind_as_cifs_server', + 'bind_dn', + 'bind_password', + 'min_bind_level', + 'tcp_port', + 'query_timeout', + 'referral_enabled', + 'session_security', + 'use_start_tls', + 'ldaps_enabled' + ] + + def get_ldap_client(self, client_config_name=None, vserver_name=None): + ''' + Checks if LDAP client config exists. + + :return: + ldap client config object if found + None if not found + :rtype: object/None + ''' + # Make query + client_config_info = netapp_utils.zapi.NaElement('ldap-client-get-iter') + + if client_config_name is None: + client_config_name = self.parameters['name'] + + if vserver_name is None: + vserver_name = '*' + + query_details = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client', + **{ + 'ldap-client-config': client_config_name, + 'vserver': vserver_name}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + client_config_info.add_child_elem(query) + + result = self.server.invoke_successfully(client_config_info, enable_tunneling=False) + + # Get LDAP client configuration details + client_config_details = None + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + client_config_info = attributes_list.get_child_by_name('ldap-client') + ldap_server_list = self.get_list_from_children(client_config_info, 'ldap-servers') + preferred_ad_servers_list = self.get_list_from_children(client_config_info, 'preferred-ad-servers') + + # Define config details structure + client_config_details = { + 'name': client_config_info.get_child_content('ldap-client-config'), + 'servers': ldap_server_list, + 'ad_domain': client_config_info.get_child_content('ad-domain'), + 'base_dn': client_config_info.get_child_content('base-dn'), + 'base_scope': client_config_info.get_child_content('base-scope'), + 'bind_as_cifs_server': self.na_helper.get_value_for_bool(from_zapi=True, + value=client_config_info.get_child_content('bind-as-cifs-server')), + 'bind_dn': client_config_info.get_child_content('bind-dn'), + 'bind_password': client_config_info.get_child_content('bind-password'), + 'min_bind_level': client_config_info.get_child_content('min-bind-level'), + 'tcp_port': self.na_helper.get_value_for_int(from_zapi=True, value=client_config_info.get_child_content('tcp-port')), + 'preferred_ad_servers': preferred_ad_servers_list, + 'query_timeout': self.na_helper.get_value_for_int(from_zapi=True, + value=client_config_info.get_child_content('query-timeout')), + 'referral_enabled': self.na_helper.get_value_for_bool(from_zapi=True, + value=client_config_info.get_child_content('referral-enabled')), + 'schema': client_config_info.get_child_content('schema'), + 'session_security': client_config_info.get_child_content('session-security'), + 'use_start_tls': self.na_helper.get_value_for_bool(from_zapi=True, + value=client_config_info.get_child_content('use-start-tls')), + 'ldaps_enabled': self.na_helper.get_value_for_bool(from_zapi=True, + value=client_config_info.get_child_content('ldaps-enabled')), + } + return client_config_details + + def get_list_from_children(self, client_config_info, element_name): + # Get list for element chidren + # returns empty list if element does not exist + get_list = client_config_info.get_child_by_name(element_name) + return [x.get_content() for x in get_list.get_children()] if get_list is not None else [] + + def create_ldap_client(self): + ''' + Create LDAP client configuration + ''' + options = { + 'ldap-client-config': self.parameters['name'], + 'schema': self.parameters['schema'], + } + + # Other options/attributes + for attribute in self.simple_attributes: + if self.parameters.get(attribute) is not None: + options[str(attribute).replace('_', '-')] = str(self.parameters[attribute]) + + # Initialize NaElement + ldap_client_create = netapp_utils.zapi.NaElement.create_node_with_children('ldap-client-create', **options) + + # LDAP servers NaElement + if self.parameters.get('servers') is not None: + self.add_element_with_children('ldap-servers', 'servers', 'string', ldap_client_create) + + # preferred_ad_servers + if self.parameters.get('preferred_ad_servers') is not None: + self.add_element_with_children('preferred-ad-servers', 'preferred_ad_servers', 'ip-address', ldap_client_create) + + # Try to create LDAP configuration + try: + self.server.invoke_successfully(ldap_client_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json( + msg='Error creating LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)), + exception=traceback.format_exc()) + + def add_element_with_children(self, element_name, param_name, child_name, ldap_client_create): + ldap_servers_element = netapp_utils.zapi.NaElement(element_name) + for ldap_server_name in self.parameters[param_name]: + ldap_servers_element.add_new_child(child_name, ldap_server_name) + ldap_client_create.add_child_elem(ldap_servers_element) + + def delete_ldap_client(self): + ''' + Delete LDAP client configuration + ''' + ldap_client_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'ldap-client-delete', **{'ldap-client-config': self.parameters['name']}) + + try: + self.server.invoke_successfully(ldap_client_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json(msg='Error deleting LDAP client configuration %s: %s' % ( + self.parameters['name'], to_native(errcatch)), exception=traceback.format_exc()) + + def modify_ldap_client(self, modify): + ''' + Modify LDAP client + :param modify: list of modify attributes + ''' + ldap_client_modify = netapp_utils.zapi.NaElement('ldap-client-modify') + ldap_client_modify.add_new_child('ldap-client-config', self.parameters['name']) + + for attribute in modify: + # LDAP_servers + if attribute == 'servers': + self.add_element_with_children('ldap-servers', attribute, 'string', ldap_client_modify) + # preferred_ad_servers + if attribute == 'preferred_ad_servers': + self.add_element_with_children('preferred-ad-servers', attribute, 'ip-address', ldap_client_modify) + # Simple attributes + if attribute in self.simple_attributes: + ldap_client_modify.add_new_child(str(attribute).replace('_', '-'), str(self.parameters[attribute])) + + # Try to modify LDAP client + try: + self.server.invoke_successfully(ldap_client_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as errcatch: + self.module.fail_json( + msg='Error modifying LDAP client %s: %s' % (self.parameters['name'], to_native(errcatch)), + exception=traceback.format_exc()) + + def get_ldap_client_rest(self): + """ + Retrives ldap client config with rest API. + """ + if not self.use_rest: + return self.get_ldap_client() + query = {'svm.name': self.parameters.get('vserver'), + 'fields': 'svm.uuid,' + 'ad_domain,' + 'servers,' + 'preferred_ad_servers,' + 'bind_dn,' + 'schema,' + 'port,' + 'base_dn,' + 'base_scope,' + 'min_bind_level,' + 'session_security,' + 'use_start_tls,'} + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 0): + query['fields'] += 'bind_as_cifs_server,query_timeout,referral_enabled,ldaps_enabled' + record, error = rest_generic.get_one_record(self.rest_api, 'name-services/ldap', query) + if error: + self.module.fail_json(msg="Error on getting idap client info: %s" % error) + if record: + return { + 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])}, + 'ad_domain': self.na_helper.safe_get(record, ['ad_domain']), + 'preferred_ad_servers': self.na_helper.safe_get(record, ['preferred_ad_servers']), + 'servers': self.na_helper.safe_get(record, ['servers']), + 'schema': self.na_helper.safe_get(record, ['schema']), + 'port': self.na_helper.safe_get(record, ['port']), + 'ldaps_enabled': self.na_helper.safe_get(record, ['ldaps_enabled']), + 'min_bind_level': self.na_helper.safe_get(record, ['min_bind_level']), + 'bind_dn': self.na_helper.safe_get(record, ['bind_dn']), + 'base_dn': self.na_helper.safe_get(record, ['base_dn']), + 'base_scope': self.na_helper.safe_get(record, ['base_scope']), + 'use_start_tls': self.na_helper.safe_get(record, ['use_start_tls']), + 'session_security': self.na_helper.safe_get(record, ['session_security']), + 'referral_enabled': self.na_helper.safe_get(record, ['referral_enabled']), + 'bind_as_cifs_server': self.na_helper.safe_get(record, ['bind_as_cifs_server']), + 'query_timeout': self.na_helper.safe_get(record, ['query_timeout']) + } + return None + + def create_ldap_client_body_rest(self, modify=None): + """ + ldap client config body for create and modify with rest API. + """ + config_options = ['ad_domain', 'servers', 'preferred_ad_servers', 'bind_dn', 'schema', 'port', 'base_dn', 'referral_enabled', 'ldaps_enabled', + 'base_scope', 'bind_as_cifs_server', 'bind_password', 'min_bind_level', 'query_timeout', 'session_security', 'use_start_tls'] + processing_options = ['skip_config_validation'] + body = {} + for key in config_options: + if not modify and key in self.parameters: + body[key] = self.parameters[key] + elif modify and key in modify: + body[key] = modify[key] + for key in processing_options: + if body and key in self.parameters: + body[key] = self.parameters[key] + return body + + def create_ldap_client_rest(self): + """ + create ldap client config with rest API. + """ + if not self.use_rest: + return self.create_ldap_client() + body = self.create_ldap_client_body_rest() + body['svm.name'] = self.parameters['vserver'] + api = 'name-services/ldap' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating ldap client: %s" % error) + + def delete_ldap_client_rest(self, current): + """ + delete ldap client config with rest API. + """ + if not self.use_rest: + return self.delete_ldap_client() + api = 'name-services/ldap' + dummy, error = rest_generic.delete_async(self.rest_api, api, current['svm']['uuid'], body=None) + if error is not None: + self.module.fail_json(msg="Error on deleting ldap client rest: %s" % error) + + def modify_ldap_client_rest(self, current, modify): + """ + modif ldap client config with rest API. + """ + if not self.use_rest: + return self.modify_ldap_client(modify) + body = self.create_ldap_client_body_rest(modify) + if body: + api = 'name-services/ldap' + dummy, error = rest_generic.patch_async(self.rest_api, api, current['svm']['uuid'], body) + if error is not None: + self.module.fail_json(msg="Error on modifying ldap client config: %s" % error) + + def apply(self): + '''Call create/modify/delete operations.''' + current = self.get_ldap_client_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + # state is present, either servers or ad_domain is required + if self.parameters['state'] == 'present' and not self.parameters.get('servers') \ + and self.parameters.get('ad_domain') is None: + self.module.fail_json(msg='Required one of servers or ad_domain') + # REST retrives only data svm ldap configuration, error if try to use non data svm. + if cd_action == "create" and self.use_rest: + rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_ldap_client_rest() + elif cd_action == 'delete': + self.delete_ldap_client_rest(current) + elif modify: + self.modify_ldap_client_rest(current, modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''ONTAP LDAP client configuration''' + ldapclient = NetAppOntapLDAPClient() + ldapclient.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py new file mode 100644 index 000000000..1ed628dbb --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_license.py @@ -0,0 +1,708 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_license +''' +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_license + +short_description: NetApp ONTAP protocol and feature license packages +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Add or remove license packages on NetApp ONTAP. + - Note that the module is asymmetrical. + - It requires license codes to add packages and the package name is not visible. + - It requires package names and as serial number to remove packages. + +options: + state: + description: + - Whether the specified license packages should be installed or removed. + choices: ['present', 'absent'] + type: str + default: present + + remove_unused: + description: + - Remove license packages that have no controller affiliation in the cluster. + - Not supported with REST. + type: bool + + remove_expired: + description: + - Remove license packages that have expired in the cluster. + - Not supported with REST. + type: bool + + serial_number: + description: + - Serial number of the node or cluster associated with the license package. + - This parameter is required when removing a license package. + - With REST, '*' is accepted and matches any serial number. + type: str + + license_names: + type: list + elements: str + description: + - List of license package names to remove. + suboptions: + base: + description: + - Cluster Base License + nfs: + description: + - NFS License + cifs: + description: + - CIFS License + iscsi: + description: + - iSCSI License + fcp: + description: + - FCP License + cdmi: + description: + - CDMI License + snaprestore: + description: + - SnapRestore License + snapmirror: + description: + - SnapMirror License + flexclone: + description: + - FlexClone License + snapvault: + description: + - SnapVault License + snaplock: + description: + - SnapLock License + snapmanagersuite: + description: + - SnapManagerSuite License + snapprotectapps: + description: + - SnapProtectApp License + v_storageattach: + description: + - Virtual Attached Storage License + + license_codes: + description: + - List of license codes to be installed. + type: list + elements: str + +notes: + - Partially supports check_mode - some changes cannot be detected until an add or remove action is performed. + - Supports 28 character key licenses with ZAPI and REST. + - Supports NetApp License File Version 2 (NLFv2) with REST. + - NetApp License File Version 1 (NLFv1) with REST is not supported at present but may work. + - Ansible attempts to reformat license files as the contents are python-like. + Use the string filter in case of problem to disable this behavior. + - This module requires the python ast and json packages when the string filter is not used. + - This module requires the json package to check for idempotency, and to remove licenses using a NLFv2 file. + - This module requires the deepdiff package to check for idempotency. + - None of these packages are required when the string filter is used, but the module will not be idempotent. +''' + + +EXAMPLES = """ +- name: Add licenses - 28 character keys + netapp.ontap.na_ontap_license: + state: present + serial_number: ################# + license_codes: CODE1,CODE2 + +- name: Remove licenses + netapp.ontap.na_ontap_license: + state: absent + remove_unused: false + remove_expired: true + serial_number: ################# + license_names: nfs,cifs + +- name: Add NLF licenses + netapp.ontap.na_ontap_license: + state: present + license_codes: + - "{{ lookup('file', nlf_filepath) | string }}" + +- name: Remove NLF license bundle - using license file + netapp.ontap.na_ontap_license: + state: absent + license_codes: + - "{{ lookup('file', nlf_filepath) | string }}" + +- name: Remove NLF license bundle - using bundle name + netapp.ontap.na_ontap_license: + state: absent + remove_unused: false + remove_expired: true + serial_number: ################# + license_names: "Enterprise Edition" +""" + +RETURN = """ +updated_licenses: + description: return list of updated package names + returned: always + type: dict + sample: "['nfs']" +""" + +HAS_AST = True +HAS_DEEPDIFF = True +HAS_JSON = True +IMPORT_ERRORS = [] + +try: + import ast +except ImportError as exc: + HAS_AST = False + IMPORT_ERRORS.append(exc) + +try: + from deepdiff import DeepDiff +except (ImportError, SyntaxError) as exc: + # With Ansible 2.9, python 2.6 reports a SyntaxError + HAS_DEEPDIFF = False + IMPORT_ERRORS.append(exc) + +try: + import json +except ImportError as exc: + HAS_JSON = False + IMPORT_ERRORS.append(exc) + +import re +import sys +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +if sys.version_info < (3, 5): + # not defined in earlier versions + RecursionError = RuntimeError + + +def local_cmp(a, b): + """ + compares with only values and not keys, keys should be the same for both dicts + :param a: dict 1 + :param b: dict 2 + :return: difference of values in both dicts + """ + return [key for key in a if a[key] != b[key]] + + +class NetAppOntapLicense: + '''ONTAP license class''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + serial_number=dict(required=False, type='str'), + remove_unused=dict(default=None, type='bool'), + remove_expired=dict(default=None, type='bool'), + license_codes=dict(default=None, type='list', elements='str'), + license_names=dict(default=None, type='list', elements='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=False, + required_if=[ + ('state', 'absent', ['license_codes', 'license_names'], True)], + required_together=[ + ('serial_number', 'license_names')], + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.license_status = {} + # list of tuples - original licenses (license_code or NLF contents), and dict of NLF contents (empty dict for legacy codes) + self.nlfs = [] + # when using REST, just keep a list as returned by GET to use with deepdiff + self.previous_records = [] + + # Set up REST API + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['remove_unused', 'remove_expired'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + self.validate_nlfs() + + def get_licensing_status(self): + """ + Check licensing status + + :return: package (key) and licensing status (value) + :rtype: dict + """ + if self.use_rest: + return self.get_licensing_status_rest() + license_status = netapp_utils.zapi.NaElement( + 'license-v2-status-list-info') + result = None + try: + result = self.server.invoke_successfully(license_status, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error checking license status: %s" % + to_native(error), exception=traceback.format_exc()) + + return_dictionary = {} + license_v2_status = result.get_child_by_name('license-v2-status') + if license_v2_status: + for license_v2_status_info in license_v2_status.get_children(): + package = license_v2_status_info.get_child_content('package') + status = license_v2_status_info.get_child_content('method') + return_dictionary[package] = status + return return_dictionary, None + + def get_licensing_status_rest(self): + api = 'cluster/licensing/licenses' + # By default, the GET method only returns licensed packages. + # To retrieve all the available package state details, below query is used. + query = {'state': 'compliant, noncompliant, unlicensed, unknown'} + fields = 'name,state,licenses' + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + current = {'installed_licenses': {}} + if records: + for package in records: + current[package['name']] = package['state'] + if 'licenses' in package: + for license in package['licenses']: + installed_license = license.get('installed_license') + serial_number = license.get('serial_number') + if serial_number and installed_license: + if serial_number not in current: + current['installed_licenses'][serial_number] = set() + current['installed_licenses'][serial_number].add(installed_license) + return current, records + + def remove_licenses(self, package_name, nlf_dict=None): + """ + Remove requested licenses + :param: + package_name: Name of the license to be deleted + """ + if self.use_rest: + return self.remove_licenses_rest(package_name, nlf_dict or {}) + license_delete = netapp_utils.zapi.NaElement('license-v2-delete') + license_delete.add_new_child('serial-number', self.parameters['serial_number']) + license_delete.add_new_child('package', package_name) + try: + self.server.invoke_successfully(license_delete, + enable_tunneling=False) + return True + except netapp_utils.zapi.NaApiError as error: + # Error 15661 - Object not found + if to_native(error.code) == "15661": + return False + else: + self.module.fail_json(msg="Error removing license %s" % + to_native(error), exception=traceback.format_exc()) + + def remove_licenses_rest(self, package_name, nlf_dict): + """ + This is called either with a package name or a NLF dict + We already validated product and serialNumber are present in nlf_dict + """ + p_serial_number = self.parameters.get('serial_number') + n_serial_number = nlf_dict.get('serialNumber') + n_product = nlf_dict.get('product') + serial_number = n_serial_number or p_serial_number + if not serial_number: + self.module.fail_json(msg='Error: serial_number is required to delete a license.') + if n_product: + error = self.remove_one_license_rest(None, n_product, serial_number) + elif package_name.endswith(('Bundle', 'Edition')): + error = self.remove_one_license_rest(None, package_name, serial_number) + else: + error = self.remove_one_license_rest(package_name, None, serial_number) + if error and "entry doesn't exist" in error: + return False + if error: + self.module.fail_json(msg="Error removing license for serial number %s and %s: %s" + % (serial_number, n_product or package_name, error)) + return True + + def remove_one_license_rest(self, package_name, product, serial_number): + api = 'cluster/licensing/licenses' + query = {'serial_number': serial_number} + if product: + query['licenses.installed_license'] = product.replace(' ', '*') + # since this is a query, we need to specify state, or only active licenses are removed + query['state'] = '*' + dummy, error = rest_generic.delete_async(self.rest_api, api, package_name, query) + return error + + def remove_unused_licenses(self): + """ + Remove unused licenses + """ + remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused') + try: + self.server.invoke_successfully(remove_unused, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error removing unused licenses: %s" % + to_native(error), exception=traceback.format_exc()) + + def remove_expired_licenses(self): + """ + Remove expired licenses + """ + remove_expired = netapp_utils.zapi.NaElement( + 'license-v2-delete-expired') + try: + self.server.invoke_successfully(remove_expired, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error removing expired licenses: %s" % + to_native(error), exception=traceback.format_exc()) + + def add_licenses(self): + """ + Add licenses + """ + if self.use_rest: + return self.add_licenses_rest() + license_add = netapp_utils.zapi.NaElement('license-v2-add') + codes = netapp_utils.zapi.NaElement('codes') + for code in self.parameters['license_codes']: + codes.add_new_child('license-code-v2', str(code.strip().lower())) + license_add.add_child_elem(codes) + try: + self.server.invoke_successfully(license_add, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error adding licenses: %s" % + to_native(error), exception=traceback.format_exc()) + + def add_licenses_rest(self): + api = 'cluster/licensing/licenses' + body = {'keys': [x[0] for x in self.nlfs]} + headers = None + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # request nested errors + headers = {'X-Dot-Error-Arguments': 'true'} + dummy, error = rest_generic.post_async(self.rest_api, api, body, headers=headers) + if error: + error = self.format_post_error(error, body) + if 'conflicts' in error: + return error + self.module.fail_json(msg="Error adding license: %s - previous license status: %s" % (error, self.license_status)) + return None + + def compare_license_status(self, previous_license_status): + changed_keys = [] + for __ in range(5): + error = None + new_license_status, records = self.get_licensing_status() + try: + changed_keys = local_cmp(previous_license_status, new_license_status) + break + except KeyError as exc: + # when a new license is added, it seems REST may not report all licenses + # wait for things to stabilize + error = exc + time.sleep(5) + if error: + self.module.fail_json(msg='Error: mismatch in license package names: %s. Expected: %s, found: %s.' + % (error, previous_license_status.keys(), new_license_status.keys())) + if 'installed_licenses' in changed_keys: + changed_keys.remove('installed_licenses') + if records and self.previous_records: + deep_changed_keys = self.deep_compare(records) + for key in deep_changed_keys: + if key not in changed_keys: + changed_keys.append(key) + return changed_keys + + def deep_compare(self, records): + """ look for any change in license details, capacity, expiration, ... + this is run after apply, so we don't know for sure in check_mode + """ + if not HAS_DEEPDIFF: + self.module.warn('deepdiff is required to identify detailed changes') + return [] + diffs = DeepDiff(self.previous_records, records) + self.rest_api.log_debug('diffs', diffs) + roots = set(re.findall(r'root\[(\d+)\]', str(diffs))) + result = [records[int(index)]['name'] for index in roots] + self.rest_api.log_debug('deep_changed_keys', result) + return result + + def reformat_nlf(self, license_code): + # Ansible converts double quotes into single quotes if the input is python-like + # and we can't use json loads with single quotes! + if not HAS_AST or not HAS_JSON: + return None, "ast and json packages are required to install NLF license files. Import error(s): %s." % IMPORT_ERRORS + try: + nlf_dict = ast.literal_eval(license_code) + except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError) as exc: + return None, "malformed input: %s, exception: %s" % (license_code, exc) + try: + license_code = json.dumps(nlf_dict, separators=(',', ':')) + except Exception as exc: + return None, "unable to encode input: %s - evaluated as %s, exception: %s" % (license_code, nlf_dict, exc) + return license_code, None + + def get_nlf_dict(self, license_code): + nlf_dict = {} + is_nlf = False + if '"statusResp"' in license_code: + if license_code.count('"statusResp"') > 1: + self.module.fail_json(msg="Error: NLF license files with multiple licenses are not supported, found %d in %s." + % (license_code.count('"statusResp"'), license_code)) + if license_code.count('"serialNumber"') > 1: + self.module.fail_json(msg="Error: NLF license files with multiple serial numbers are not supported, found %d in %s." + % (license_code.count('"serialNumber"'), license_code)) + is_nlf = True + if not HAS_JSON: + return nlf_dict, is_nlf, "the json package is required to process NLF license files. Import error(s): %s." % IMPORT_ERRORS + try: + nlf_dict = json.loads(license_code) + except Exception as exc: + return nlf_dict, is_nlf, "the license contents cannot be read. Unable to decode input: %s - exception: %s." % (license_code, exc) + return nlf_dict, is_nlf, None + + def scan_license_codes_for_nlf(self, license_code): + more_info = "You %s seeing this error because the original NLF contents were modified by Ansible. You can use the string filter to keep the original." + transformed = False + original_license_code = license_code + + if "'statusResp'" in license_code: + license_code, error = self.reformat_nlf(license_code) + if error: + error = 'Error: %s %s' % (error, more_info % 'are') + self.module.fail_json(msg=error) + transformed = True + + # For an NLF license, extract fields, to later collect serial number and bundle name (product) + nlf_dict, is_nlf, error = self.get_nlf_dict(license_code) + if error and transformed: + error = 'Error: %s. Ansible input: %s %s' % (error, original_license_code, more_info % 'may be') + self.module.fail_json(msg=error) + + if error: + msg = "The license " + ( + "will be installed without checking for idempotency." if self.parameters['state'] == 'present' else "cannot be removed.") + msg += " You are seeing this warning because " + error + self.module.warn(msg) + + return license_code, nlf_dict, is_nlf + + def split_nlf(self, license_code): + """ A NLF file may contain several licenses + One license per line + Return a list of 1 or more licenses + """ + licenses = license_code.count('"statusResp"') + if licenses <= 1: + return [license_code] + nlfs = license_code.splitlines() + if len(nlfs) != licenses: + self.module.fail_json(msg="Error: unexpected format found %d entries and %d lines in %s" + % (licenses, len(nlfs), license_code)) + return nlfs + + def split_nlfs(self): + """ A NLF file may contain several licenses + Return a flattened list of license codes + """ + license_codes = [] + for license in self.parameters.get('license_codes', []): + license_codes.extend(self.split_nlf(license)) + return license_codes + + def validate_nlfs(self): + self.parameters['license_codes'] = self.split_nlfs() + nlf_count = 0 + for license in self.parameters['license_codes']: + nlf, nlf_dict, is_nlf = self.scan_license_codes_for_nlf(license) + if is_nlf and not self.use_rest: + self.module.fail_json(msg="Error: NLF license format is not supported with ZAPI.") + self.nlfs.append((nlf, nlf_dict)) + if is_nlf: + nlf_count += 1 + if nlf_count and nlf_count != len(self.parameters['license_codes']): + self.module.fail_json(msg="Error: cannot mix legacy licenses and NLF licenses; found %d NLF licenses out of %d license_codes." + % (nlf_count, len(self.parameters['license_codes']))) + + def get_key(self, error, body): + needle = r'Failed to install the license at index (\d+)' + matched = re.search(needle, error) + if matched: + index = int(matched.group(1)) + return body['keys'][index] + return None + + def format_post_error(self, error, body): + if 'The system received a licensing request with an invalid digital signature.' in error: + key = self.get_key(error, body) + if key and "'statusResp'" in key: + error = 'Original NLF contents were modified by Ansible. Make sure to use the string filter. REST error: %s' % error + return error + + def nlf_is_installed(self, nlf_dict): + """ return True if NLF with same SN, product (bundle) name and package list is present + return False otherwise + Even when present, the NLF may not be active, so this is only useful for delete + """ + n_serial_number, n_product = self.get_sn_and_product(nlf_dict) + if not n_product or not n_serial_number: + return False + if 'installed_licenses' not in self.license_status: + # nothing is installed + return False + if n_serial_number == '*' and self.parameters['state'] == 'absent': + # force a delete + return True + if n_serial_number not in self.license_status['installed_licenses']: + return False + return n_product in self.license_status['installed_licenses'][n_serial_number] + + def get_sn_and_product(self, nlf_dict): + # V2 and V1 formats + n_serial_number = self.na_helper.safe_get(nlf_dict, ['statusResp', 'serialNumber'])\ + or self.na_helper.safe_get(nlf_dict, ['statusResp', 'licenses', 'serialNumber']) + n_product = self.na_helper.safe_get(nlf_dict, ['statusResp', 'product'])\ + or self.na_helper.safe_get(nlf_dict, ['statusResp', 'licenses', 'product']) + return n_serial_number, n_product + + def validate_delete_action(self, nlf_dict): + """ make sure product and serialNumber are set at the top level (V2 format) """ + # product is required for delete + n_serial_number, n_product = self.get_sn_and_product(nlf_dict) + if nlf_dict and not n_product: + self.module.fail_json(msg='Error: product not found in NLF file %s.' % nlf_dict) + # if serial number is not present in the NLF, we could use a module parameter + p_serial_number = self.parameters.get('serial_number') + if p_serial_number and n_serial_number and p_serial_number != n_serial_number: + self.module.fail_json(msg='Error: mismatch is serial numbers %s vs %s' % (p_serial_number, n_serial_number)) + if nlf_dict and not n_serial_number and not p_serial_number: + self.module.fail_json(msg='Error: serialNumber not found in NLF file. It can be set in the module parameter.') + nlf_dict['serialNumber'] = n_serial_number or p_serial_number + nlf_dict['product'] = n_product + + def get_delete_actions(self): + packages_to_delete = [] + if self.parameters.get('license_names') is not None: + for package in list(self.parameters['license_names']): + if 'installed_licenses' in self.license_status and self.parameters['serial_number'] != '*'\ + and self.parameters['serial_number'] in self.license_status['installed_licenses']\ + and package in self.license_status['installed_licenses'][self.parameters['serial_number']]: + packages_to_delete.append(package) + if package in self.license_status: + packages_to_delete.append(package) + + for dummy, nlf_dict in self.nlfs: + if nlf_dict: + self.validate_delete_action(nlf_dict) + nlfs_to_delete = [ + nlf_dict + for dummy, nlf_dict in self.nlfs + if self.nlf_is_installed(nlf_dict) + ] + return bool(nlfs_to_delete) or bool(self.parameters.get('license_names')), packages_to_delete, nlfs_to_delete + + def get_add_actions(self): + """ add licenses unconditionally + for legacy licenses we don't know if they are already installed + for NLF licenses we don't know if some details have changed (eg capacity, expiration date) + """ + return bool(self.nlfs), [license_code for license_code, dummy in self.nlfs] + + def get_actions(self): + changed = False + licenses_to_add = [] + nlfs_to_delete = [] + remove_license = False + packages_to_delete = [] + nlfs_to_delete = [] + # Add / Update licenses. + self.license_status, self.previous_records = self.get_licensing_status() + if self.parameters['state'] == 'absent': # delete + changed, packages_to_delete, nlfs_to_delete = self.get_delete_actions() + else: # add or update + changed, licenses_to_add = self.get_add_actions() + if self.parameters.get('remove_unused') is not None: + remove_license = True + changed = True + if self.parameters.get('remove_expired') is not None: + remove_license = True + changed = True + return changed, licenses_to_add, remove_license, packages_to_delete, nlfs_to_delete + + def apply(self): + '''Call add, delete or modify methods''' + changed, licenses_to_add, remove_license, packages_to_delete, nlfs_to_delete = self.get_actions() + error, changed_keys = None, [] + if changed and not self.module.check_mode: + if self.parameters['state'] == 'present': # execute create + if licenses_to_add: + error = self.add_licenses() + if self.parameters.get('remove_unused') is not None: + self.remove_unused_licenses() + if self.parameters.get('remove_expired') is not None: + self.remove_expired_licenses() + # not able to detect that a new license is required until we try to install it. + if licenses_to_add or remove_license: + changed_keys = self.compare_license_status(self.license_status) + # delete actions + else: + if nlfs_to_delete: + changed_keys.extend([nlf_dict.get("product") for nlf_dict in nlfs_to_delete if self.remove_licenses(None, nlf_dict)]) + if packages_to_delete: + changed_keys.extend([package for package in self.parameters['license_names'] if self.remove_licenses(package)]) + if not changed_keys: + changed = False + + if error: + error = 'Error: ' + ( + 'some licenses were updated, but others were in conflict: ' + if changed_keys + else 'adding licenses: ' + ) + error + self.module.fail_json(msg=error, changed=changed, updated_licenses=changed_keys) + self.module.exit_json(changed=changed, updated_licenses=changed_keys) + + +def main(): + '''Apply license operations''' + obj = NetAppOntapLicense() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py new file mode 100644 index 000000000..5b895bc7b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_local_hosts.py @@ -0,0 +1,197 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_ontap_local_hosts +short_description: NetApp ONTAP local hosts +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 22.0.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create or delete or modify local hosts in ONTAP. +options: + state: + description: + - Whether the specified local hosts should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + owner: + description: + - Name of the data SVM or cluster. + required: True + type: str + aliases: + description: + - The list of aliases. + type: list + elements: str + host: + description: + - Canonical hostname. + - minimum length is 1 and maximum length is 255. + type: str + address: + description: + - IPv4/IPv6 address in dotted form. + required: True + type: str +""" + +EXAMPLES = """ + - name: Create IP to host mapping + netapp.ontap.na_ontap_local_hosts: + state: present + address: 10.10.10.10 + host: example.com + aliases: ['ex1.com', 'ex2.com'] + owner: svm1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Modify IP to host mapping + netapp.ontap.na_ontap_local_hosts: + state: present + address: 10.10.10.10 + owner: svm1 + host: example1.com + aliases: ['ex1.com', 'ex2.com', 'ex3.com'] + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete host object + netapp.ontap.na_ontap_local_hosts: + state: absent + address: 10.10.10.10 + owner: svm1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress + + +class NetAppOntapLocalHosts: + """ object initialize and class methods """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + owner=dict(required=True, type='str'), + address=dict(required=True, type='str'), + aliases=dict(required=False, type='list', elements='str'), + host=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.parameters['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters['address'], self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.owner_uuid = None + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_local_hosts', 9, 10, 1) + + def get_local_host_rest(self): + ''' + Retrieves IP to hostname mapping for SVM of the cluster. + ''' + api = 'name-services/local-hosts' + query = {'owner.name': self.parameters['owner'], + 'address': self.parameters['address'], + 'fields': 'address,hostname,owner.name,owner.uuid,aliases'} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)), + exception=traceback.format_exc()) + if record: + self.owner_uuid = record['owner']['uuid'] + return { + 'address': self.na_helper.safe_get(record, ['address']), + 'host': self.na_helper.safe_get(record, ['hostname']), + 'aliases': self.na_helper.safe_get(record, ['aliases']) + } + return record + + def create_local_host_rest(self): + ''' + Creates a new IP to hostname mapping. + ''' + api = 'name-services/local-hosts' + body = {'owner.name': self.parameters.get('owner'), + 'address': self.parameters.get('address'), + 'hostname': self.parameters.get('host')} + if 'aliases' in self.parameters: + body['aliases'] = self.parameters.get('aliases') + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)), + exception=traceback.format_exc()) + + def modify_local_host_rest(self, modify): + ''' + For a specified SVM and IP address, modifies the corresponding IP to hostname mapping. + ''' + body = {} + if 'aliases' in modify: + body['aliases'] = self.parameters['aliases'] + if 'host' in modify: + body['hostname'] = self.parameters['host'] + api = 'name-services/local-hosts/%s/%s' % (self.owner_uuid, self.parameters['address']) + if body: + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg='Error updating IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)), + exception=traceback.format_exc()) + + def delete_local_host_rest(self): + ''' + vserver services name-service dns hosts delete. + ''' + api = 'name-services/local-hosts/%s/%s' % (self.owner_uuid, self.parameters['address']) + dummy, error = rest_generic.delete_async(self.rest_api, api, None) + if error: + self.module.fail_json(msg='Error deleting IP to hostname mappings for %s: %s' % (self.parameters['owner'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + cd_action = None + current = self.get_local_host_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_local_host_rest() + elif cd_action == 'delete': + self.delete_local_host_rest() + elif modify: + self.modify_local_host_rest(modify) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ Create object and call apply """ + hosts_obj = NetAppOntapLocalHosts() + hosts_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py new file mode 100644 index 000000000..2ad66eb0b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_log_forward.py @@ -0,0 +1,312 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: na_ontap_log_forward +short_description: NetApp ONTAP Log Forward Configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.2.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify the log forward configuration +options: + state: + description: + - Whether the log forward configuration should exist or not + choices: ['present', 'absent'] + default: present + type: str + + destination: + description: + - Destination address that the log messages will be forwarded to. Can be a hostname or IP address. + required: true + type: str + + port: + description: + - The destination port used to forward the message. + required: true + type: int + + facility: + description: + - Facility code used to indicate the type of software that generated the message. + type: str + choices: ['kern', 'user', 'local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7'] + + force: + description: + - Skip the Connectivity Test + type: bool + + protocol: + description: + - Log Forwarding Protocol + choices: ['udp_unencrypted', 'tcp_unencrypted', 'tcp_encrypted'] + type: str + + verify_server: + description: + - Verify Destination Server Identity + type: bool +''' + +EXAMPLES = """ +- name: Create log forward configuration + na_ontap_log_forward: + state: present + destination: 10.11.12.13 + port: 514 + protocol: udp_unencrypted + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Modify log forward configuration + na_ontap_log_forward: + state: present + destination: 10.11.12.13 + port: 514 + protocol: tcp_unencrypted + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +- name: Delete log forward configuration + na_ontap_log_forward: + state: absent + destination: 10.11.12.13 + port: 514 + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapLogForward(object): + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + destination=dict(required=True, type='str'), + port=dict(required=True, type='int'), + facility=dict(required=False, type='str', choices=['kern', 'user', 'local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7']), + force=dict(required=False, type='bool'), + protocol=dict(required=False, type='str', choices=['udp_unencrypted', 'tcp_unencrypted', 'tcp_encrypted']), + verify_server=dict(required=False, type='bool') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_log_forward_config(self): + """ + gets log forward configuration + :return: dict of log forward properties if exist, None if not + """ + + if self.use_rest: + log_forward_config = None + api = "security/audit/destinations" + query = {'fields': 'port,protocol,facility,address,verify_server', + 'address': self.parameters['destination'], + 'port': self.parameters['port']} + + message, error = self.rest_api.get(api, query) + if error: + self.module.fail_json(msg=error) + if len(message.keys()) == 0: + return None + elif 'records' in message and len(message['records']) == 0: + return None + elif 'records' not in message: + error = "Unexpected response in get_security_key_manager from %s: %s" % (api, repr(message)) + self.module.fail_json(msg=error) + log_forward_config = { + 'destination': message['records'][0]['address'], + 'facility': message['records'][0]['facility'], + 'port': message['records'][0]['port'], + 'protocol': message['records'][0]['protocol'], + 'verify_server': message['records'][0]['verify_server'] + } + + return log_forward_config + + else: + log_forward_config = None + + log_forward_get = netapp_utils.zapi.NaElement('cluster-log-forward-get') + log_forward_get.add_new_child('destination', self.parameters['destination']) + log_forward_get.add_new_child('port', self.na_helper.get_value_for_int(False, self.parameters['port'])) + + try: + result = self.server.invoke_successfully(log_forward_get, True) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) == "15661": + # config doesnt exist + return None + else: + self.module.fail_json( + msg='Error getting log forward configuration for destination %s on port %s: %s' % + (self.parameters['destination'], self.na_helper.get_value_for_int(False, self.parameters['port']), to_native(error)), + exception=traceback.format_exc() + ) + + if result.get_child_by_name('attributes'): + log_forward_attributes = result.get_child_by_name('attributes') + cluster_log_forward_info = log_forward_attributes.get_child_by_name('cluster-log-forward-info') + log_forward_config = { + 'destination': cluster_log_forward_info.get_child_content('destination'), + 'facility': cluster_log_forward_info.get_child_content('facility'), + 'port': self.na_helper.get_value_for_int(True, cluster_log_forward_info.get_child_content('port')), + 'protocol': cluster_log_forward_info.get_child_content('protocol'), + 'verify_server': self.na_helper.get_value_for_bool(True, cluster_log_forward_info.get_child_content('verify-server')) + } + + return log_forward_config + + def create_log_forward_config(self): + """ + Creates a log forward config + :return: nothing + """ + + if self.use_rest: + api = "security/audit/destinations" + body = dict() + body['address'] = self.parameters['destination'] + body['port'] = self.parameters['port'] + + for attr in ('protocol', 'facility', 'verify_server', 'force'): + if attr in self.parameters: + body[attr] = self.parameters[attr] + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + else: + log_forward_config_obj = netapp_utils.zapi.NaElement('cluster-log-forward-create') + log_forward_config_obj.add_new_child('destination', self.parameters['destination']) + log_forward_config_obj.add_new_child('port', self.na_helper.get_value_for_int(False, self.parameters['port'])) + + if 'facility' in self.parameters: + log_forward_config_obj.add_new_child('facility', self.parameters['facility']) + + if 'force' in self.parameters: + log_forward_config_obj.add_new_child('force', self.na_helper.get_value_for_bool(False, self.parameters['force'])) + + if 'protocol' in self.parameters: + log_forward_config_obj.add_new_child('protocol', self.parameters['protocol']) + + if 'verify_server' in self.parameters: + log_forward_config_obj.add_new_child('verify-server', self.na_helper.get_value_for_bool(False, self.parameters['verify_server'])) + + try: + self.server.invoke_successfully(log_forward_config_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating log forward config with destination %s on port %s: %s' % + (self.parameters['destination'], self.na_helper.get_value_for_int(False, self.parameters['port']), to_native(error)), + exception=traceback.format_exc()) + + def modify_log_forward_config(self): + # need to recreate as protocol can't be changed + self.destroy_log_forward_config() + self.create_log_forward_config() + + def destroy_log_forward_config(self): + """ + Delete a log forward configuration + :return: nothing + """ + if self.use_rest: + + api = "security/audit/destinations/%s/%s" % (self.parameters['destination'], self.parameters['port']) + body = None + query = {'return_timeout': 3} + dummy, error = self.rest_api.delete(api, body, query) + if error: + self.module.fail_json(msg=error) + + else: + log_forward_config_obj = netapp_utils.zapi.NaElement('cluster-log-forward-destroy') + log_forward_config_obj.add_new_child('destination', self.parameters['destination']) + log_forward_config_obj.add_new_child('port', self.na_helper.get_value_for_int(False, self.parameters['port'])) + + try: + self.server.invoke_successfully(log_forward_config_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error destroying log forward destination %s on port %s: %s' % + (self.parameters['destination'], self.na_helper.get_value_for_int(False, self.parameters['port']), to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_log_forward_config() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = None + + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + if cd_action == 'create': + self.create_log_forward_config() + elif cd_action == 'delete': + self.destroy_log_forward_config() + elif modify: + self.modify_log_forward_config() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapLogForward() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py new file mode 100644 index 000000000..099cea8b9 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_login_messages.py @@ -0,0 +1,307 @@ +#!/usr/bin/python + +# (c) 2020-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_login_messages +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_login_messages +author: NetApp Ansible Team (@carchi8py) +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.1.0' +short_description: Setup login banner and message of the day +description: + - This module allows you to manipulate login banner and motd for a vserver +options: + banner: + description: + - Login banner Text message. + type: str + vserver: + description: + - The name of the SVM login messages should be set for. + - With ZAPI, this option is required. This a cluster or data SVM. + - With REST, this is a data SVM. + - With REST, cluster scope is assumed when this option is absent. + type: str + motd_message: + description: + - MOTD Text message. + - message is deprecated and will be removed to avoid a conflict with an Ansible internal variable. + type: str + aliases: + - message + show_cluster_motd: + description: + - Set to I(false) if Cluster-level Message of the Day should not be shown + type: bool + default: True +''' + +EXAMPLES = """ + + - name: modify banner vserver + netapp.ontap.na_ontap_login_messages: + vserver: trident_svm + banner: this is trident vserver + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + + - name: modify motd vserver + netapp.ontap.na_ontap_login_messages: + vserver: trident_svm + motd_message: this is trident vserver + show_cluster_motd: True + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + + - name: modify motd cluster - REST + netapp.ontap.na_ontap_login_messages: + motd_message: this is a cluster motd with REST + show_cluster_motd: True + username: "{{ username }}" + password: "{{ password }}" + hostname: "{{ hostname }}" + +""" + +RETURN = """ + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapLoginMessages: + """ + modify and delete login banner and motd + """ + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + vserver=dict(type='str'), + banner=dict(type='str'), + motd_message=dict(type='str', aliases=['message']), + show_cluster_motd=dict(default=True, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_one_of=[['show_cluster_motd', 'banner', 'motd_message']] + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + else: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if not self.parameters.get('vserver'): + self.module.fail_json(msg="Error: vserver is a required parameter when using ZAPI.") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + if 'message' in self.parameters: + self.module.warn('Error: "message" option conflicts with Ansible internal variable - please use "motd_message".') + + def get_banner_motd(self): + if self.use_rest: + api = 'security/login/messages' + query = { + 'fields': 'banner,message,show_cluster_message,uuid', + 'scope': 'cluster' + } + vserver = self.parameters.get('vserver') + if vserver: + query['scope'] = 'svm' + query['svm.name'] = vserver + + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching login_banner info: %s' % error) + if record is None and vserver is None: + self.module.fail_json(msg='Error fetching login_banner info for cluster - no data.') + return self.form_current(record) + + # ZAPI + motd, show_cluster_motd = self.get_motd_zapi() + return { + 'banner': self.get_login_banner_zapi(), + 'motd_message': motd, + 'show_cluster_motd': show_cluster_motd + } + + def form_current(self, record): + return_result = { + 'banner': '', + 'motd_message': '', + # we need the SVM UUID to add banner or motd if they are not present + 'uuid': record['uuid'] if record else self.get_svm_uuid(self.parameters.get('vserver')), + 'show_cluster_motd': record.get('show_cluster_message') if record else None + } + # by default REST adds a trailing \n if no trailing \n set in desired message/banner. + # rstip \n only when desired message/banner does not have trailing \n to preserve idempotency. + if record and record.get('banner'): + if self.parameters.get('banner', '').endswith('\n'): + return_result['banner'] = record['banner'] + else: + return_result['banner'] = record['banner'].rstrip('\n') + if record and record.get('message'): + if self.parameters.get('motd_message', '').endswith('\n'): + return_result['motd_message'] = record['message'] + else: + return_result['motd_message'] = record['message'].rstrip('\n') + return return_result + + def get_login_banner_zapi(self): + login_banner_get_iter = netapp_utils.zapi.NaElement('vserver-login-banner-get-iter') + query = netapp_utils.zapi.NaElement('query') + login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info') + login_banner_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(login_banner_info) + login_banner_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(login_banner_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching login_banner info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + login_banner_info = result.get_child_by_name('attributes-list').get_child_by_name( + 'vserver-login-banner-info') + banner = login_banner_info.get_child_content('message') + banner = str(banner).rstrip() + # if the message is '-' that means the banner doesn't exist. + if banner in ('-', 'None'): + banner = '' + return banner + return None + + def get_motd_zapi(self): + motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter') + query = netapp_utils.zapi.NaElement('query') + motd_info = netapp_utils.zapi.NaElement('vserver-motd-info') + motd_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(motd_info) + motd_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) > 0: + motd_info = result.get_child_by_name('attributes-list').get_child_by_name( + 'vserver-motd-info') + motd_message = motd_info.get_child_content('message') + motd_message = str(motd_message).rstrip() + if motd_message == 'None': + motd_message = '' + show_cluster_motd = motd_info.get_child_content('is-cluster-message-enabled') == 'true' + return motd_message, show_cluster_motd + return '', False + + def modify_rest(self, modify, uuid): + body = { + } + if 'banner' in modify: + body['banner'] = modify['banner'] + if 'motd_message' in modify: + body['message'] = modify['motd_message'] + if modify.get('show_cluster_motd') is not None: + body['show_cluster_message'] = modify['show_cluster_motd'] + if body: + api = 'security/login/messages' + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + keys = list(body.keys()) + self.module.fail_json(msg='Error modifying %s: %s' % (', '.join(keys), error)) + + def modify_banner(self, modify): + login_banner_modify = netapp_utils.zapi.NaElement('vserver-login-banner-modify-iter') + login_banner_modify.add_new_child('message', modify['banner']) + query = netapp_utils.zapi.NaElement('query') + login_banner_info = netapp_utils.zapi.NaElement('vserver-login-banner-info') + login_banner_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(login_banner_info) + login_banner_modify.add_child_elem(query) + try: + self.server.invoke_successfully(login_banner_modify, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as err: + self.module.fail_json(msg="Error modifying login_banner: %s" % (to_native(err)), + exception=traceback.format_exc()) + + def modify_motd(self, modify): + motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter') + if modify.get('motd_message') is not None: + motd_create.add_new_child('message', modify['motd_message']) + if modify.get('show_cluster_motd') is not None: + motd_create.add_new_child('is-cluster-message-enabled', 'true' if modify['show_cluster_motd'] is True else 'false') + query = netapp_utils.zapi.NaElement('query') + motd_info = netapp_utils.zapi.NaElement('vserver-motd-info') + motd_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(motd_info) + motd_create.add_child_elem(query) + try: + self.server.invoke_successfully(motd_create, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as err: + self.module.fail_json(msg="Error modifying motd: %s" % (to_native(err)), + exception=traceback.format_exc()) + + def get_svm_uuid(self, vserver): + """ + Get a svm's uuid + :return: uuid of the svm + """ + uuid, error = rest_vserver.get_vserver_uuid(self.rest_api, vserver) + if error is not None: + self.module.fail_json(msg="Error fetching vserver %s: %s" % (vserver, error)) + if uuid is None: + self.module.fail_json(msg="Error fetching vserver %s. Please make sure vserver name is correct. For cluster vserver, don't set vserver." + % vserver) + return uuid + + def apply(self): + current = self.get_banner_motd() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if self.use_rest: + self.modify_rest(modify, current['uuid']) + else: + if modify.get('banner') is not None: + self.modify_banner(modify) + if modify.get('show_cluster_motd') is not None or modify.get('motd_message') is not None: + self.modify_motd(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + '''Execute action from playbook''' + messages_obj = NetAppOntapLoginMessages() + messages_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py new file mode 100644 index 000000000..c0fb796f7 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun.py @@ -0,0 +1,1270 @@ +#!/usr/bin/python + +# (c) 2017-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_lun + +short_description: NetApp ONTAP manage LUNs +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create, destroy, resize LUNs on NetApp ONTAP. + +options: + + state: + description: + - Whether the specified LUN should exist or not. + choices: ['present', 'absent'] + type: str + default: present + + name: + description: + - The name of the LUN to manage. + - Or LUN group name (volume name) when san_application_template is used. + required: true + type: str + + from_name: + description: + - The name of the LUN to be renamed. + type: str + version_added: 20.12.0 + + flexvol_name: + description: + - The name of the FlexVol the LUN should exist on. + - Required if san_application_template is not present. + - Not allowed if san_application_template is present. + type: str + + size: + description: + - The size of the LUN in C(size_unit). + - Required when creating a single LUN if application template is not used. + type: int + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + type: str + + comment: + description: + - Optional descriptive comment for the LUN. + type: str + version_added: 21.2.0 + + force_resize: + description: + - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally + reducing the LUN size. + type: bool + + force_remove: + description: + - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped. + - If "false", destroying an online and mapped LUN will fail. + type: bool + default: false + + force_remove_fenced: + description: + - If "true", override checks that prevent a LUN from being destroyed while it is fenced. + - If "false", attempting to destroy a fenced LUN will fail. + - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later. + type: bool + + vserver: + required: true + description: + - The name of the vserver to use. + type: str + + os_type: + description: + - The os type for the LUN. + type: str + aliases: ['ostype'] + + qos_policy_group: + description: + - The QoS policy group to be set on the LUN. + - With REST, qos_policy_group and qos_adaptive_policy_group are handled as QOS policy. + type: str + version_added: 20.12.0 + + qos_adaptive_policy_group: + description: + - The adaptive QoS policy group to be set on the LUN. + - Defines measurable service level objectives (SLOs) and service level agreements (SLAs) that adjust based on the LUN's allocated space or used space. + - Requires ONTAP 9.4 or later. + - With REST, qos_policy_group and qos_adaptive_policy_group are handled as QOS policy. + type: str + version_added: 21.2.0 + + space_reserve: + description: + - This can be set to "false" which will create a LUN without any space being reserved. + type: bool + default: true + + space_allocation: + description: + - This enables support for the SCSI Thin Provisioning features. If the Host and file system do + not support this do not enable it. + type: bool + version_added: 2.7.0 + + use_exact_size: + description: + - This can be set to "false" which will round the LUN >= 450g. + type: bool + default: true + version_added: 20.11.0 + + san_application_template: + description: + - additional options when using the application/applications REST API to create LUNs. + - the module is using ZAPI by default, and switches to REST if san_application_template is present. + - create one or more LUNs (and the associated volume as needed). + - operations at the LUN level are supported, they require to know the LUN short name. + - this requires ONTAP 9.8 or higher. + - The module partially supports ONTAP 9.7 for create and delete operations, but not for modify (API limitations). + type: dict + version_added: 20.12.0 + suboptions: + name: + description: name of the SAN application. + type: str + required: true + igroup_name: + description: name of the initiator group through which the contents of this application will be accessed. + type: str + lun_count: + description: number of LUNs in the application component (1 to 32). + type: int + protection_type: + description: + - The snasphot policy for the volume supporting the LUNs. + type: dict + suboptions: + local_policy: + description: + - The snapshot copy policy for the volume. + type: str + storage_service: + description: + - The performance service level (PSL) for this volume + type: str + choices: ['value', 'performance', 'extreme'] + tiering: + description: + - Cloud tiering policy. + type: dict + suboptions: + control: + description: Storage tiering placement rules for the container. + choices: ['required', 'best_effort', 'disallowed'] + type: str + policy: + description: + - Cloud tiering policy. + choices: ['all', 'auto', 'none', 'snapshot-only'] + type: str + object_stores: + description: list of object store names for tiering. + type: list + elements: str + total_size: + description: + - The total size of the application component, split across the member LUNs in C(total_size_unit). + - Recommended when C(lun_count) is present. + - Required when C(lun_count) is present and greater than 1. + - Note - if lun_count is equal to 1, and total_size is not present, size is used to maintain backward compatibility. + type: int + version_added: 21.1.0 + total_size_unit: + description: + - The unit used to interpret the total_size parameter. + - Defaults to size_unit if not present. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + type: str + version_added: 21.1.0 + use_san_application: + description: + - Whether to use the application/applications REST/API to create LUNs. + - This will default to true if any other suboption is present. + type: bool + default: true + scope: + description: + - whether the top level name identifies a single LUN or a LUN group (application). + - By default, the module will try to make the right choice, but can report extra warnings. + - Setting scope to 'application' is required to convert an existing volume to a smart container. + - The module reports an error when 'lun' or 'application' is used and the desired action cannot be completed. + - The module issues warnings when the default 'auto' is used, and there is ambiguity regarding the desired actions. + type: str + choices: ['application', 'auto', 'lun'] + default: auto + version_added: 21.2.0 + exclude_aggregates: + description: + - The list of aggregate names to exclude when creating a volume. + - Requires ONTAP 9.9.1 GA or better. + type: list + elements: str + version_added: 21.7.0 +''' + +EXAMPLES = """ +- name: Create LUN + netapp.ontap.na_ontap_lun: + state: present + name: ansibleLUN + flexvol_name: ansibleVolume + vserver: ansibleVServer + size: 5 + size_unit: mb + os_type: linux + space_reserve: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Resize LUN + netapp.ontap.na_ontap_lun: + state: present + name: ansibleLUN + force_resize: true + flexvol_name: ansibleVolume + vserver: ansibleVServer + size: 5 + size_unit: gb + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Create LUNs using SAN application + tags: create + netapp.ontap.na_ontap_lun: + state: present + name: ansibleLUN + size: 15 + size_unit: mb + os_type: linux + space_reserve: false + san_application_template: + name: san-ansibleLUN + igroup_name: testme_igroup + lun_count: 3 + protection_type: + local_policy: default + exclude_aggregates: aggr0 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Convert existing volume to SAN application + tags: create + netapp.ontap.na_ontap_lun: + state: present + name: someVolume + size: 22 + size_unit: mb + os_type: linux + space_reserve: false + san_application_template: + name: san-ansibleLUN + igroup_name: testme_igroup + lun_count: 3 + protection_type: + local_policy: default + scope: application + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" + +import copy +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_volume +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapLUN: + ''' create, modify, delete LUN ''' + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + size=dict(type='int'), + size_unit=dict(default='gb', + choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + comment=dict(required=False, type='str'), + force_resize=dict(type='bool'), + force_remove=dict(required=False, type='bool', default=False), + force_remove_fenced=dict(type='bool'), + flexvol_name=dict(type='str'), + vserver=dict(required=True, type='str'), + os_type=dict(required=False, type='str', aliases=['ostype']), + qos_policy_group=dict(required=False, type='str'), + qos_adaptive_policy_group=dict(required=False, type='str'), + space_reserve=dict(required=False, type='bool', default=True), + space_allocation=dict(required=False, type='bool'), + use_exact_size=dict(required=False, type='bool', default=True), + san_application_template=dict(type='dict', options=dict( + use_san_application=dict(type='bool', default=True), + exclude_aggregates=dict(type='list', elements='str'), + name=dict(required=True, type='str'), + igroup_name=dict(type='str'), + lun_count=dict(type='int'), + protection_type=dict(type='dict', options=dict( + local_policy=dict(type='str'), + )), + storage_service=dict(type='str', choices=['value', 'performance', 'extreme']), + tiering=dict(type='dict', options=dict( + control=dict(type='str', choices=['required', 'best_effort', 'disallowed']), + policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']), + object_stores=dict(type='list', elements='str') # create only + )), + total_size=dict(type='int'), + total_size_unit=dict(choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', + 'pb', 'eb', 'zb', 'yb'], type='str'), + scope=dict(type='str', choices=['application', 'auto', 'lun'], default='auto'), + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('qos_policy_group', 'qos_adaptive_policy_group')] + ) + + # set up state variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + if self.parameters.get('size') is not None: + self.parameters['size'] *= netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']] + if self.na_helper.safe_get(self.parameters, ['san_application_template', 'total_size']) is not None: + unit = self.na_helper.safe_get(self.parameters, ['san_application_template', 'total_size_unit']) + if unit is None: + unit = self.parameters['size_unit'] + self.parameters['san_application_template']['total_size'] *= netapp_utils.POW2_BYTE_MAP[unit] + + self.debug = {} + self.uuid = None + # self.debug['got'] = 'empty' # uncomment to enable collecting data + + self.rest_api = OntapRestAPI(self.module) + # use_exact_size is defaulted to true, but not supported with REST. To get around this we will ignore the variable in rest. + unsupported_rest_properties = ['force_resize', 'force_remove_fenced'] + partially_supported_rest_properties = [['san_application_template', (9, 7)], + ['space_allocation', (9, 10)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, + partially_supported_rest_properties) + if self.use_rest: + self.parameters.pop('use_exact_size') + if self.parameters.get('qos_adaptive_policy_group') is not None: + self.parameters['qos_policy_group'] = self.parameters.pop('qos_adaptive_policy_group') + else: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + # set default value for ZAPI only supported options. + if self.parameters.get('force_resize') is None: + self.parameters['force_resize'] = False + if self.parameters.get('force_remove_fenced') is None: + self.parameters['force_remove_fenced'] = False + + # REST API for application/applications if needed + self.rest_app = self.setup_rest_application() + + def setup_rest_application(self): + use_application_template = self.na_helper.safe_get(self.parameters, ['san_application_template', 'use_san_application']) + rest_app = None + if self.use_rest: + if use_application_template: + if self.parameters.get('flexvol_name') is not None: + self.module.fail_json(msg="'flexvol_name' option is not supported when san_application_template is present") + name = self.na_helper.safe_get(self.parameters, ['san_application_template', 'name'], allow_sparse_dict=False) + rest_app = RestApplication(self.rest_api, self.parameters['vserver'], name) + elif self.parameters.get('flexvol_name') is None: + self.module.fail_json(msg="flexvol_name option is required when san_application_template is not present") + else: + if use_application_template: + self.module.fail_json(msg="Error: using san_application_template requires ONTAP 9.7 or later and REST must be enabled.") + if self.parameters.get('flexvol_name') is None: + self.module.fail_json(msg="Error: 'flexvol_name' option is required when using ZAPI.") + return rest_app + + def get_luns(self, lun_path=None): + """ + Return list of LUNs matching vserver and volume names. + + :return: list of LUNs in XML format. + :rtype: list + """ + if self.use_rest: + return self.get_luns_rest(lun_path) + luns = [] + tag = None + + query_details = netapp_utils.zapi.NaElement('lun-info') + query_details.add_new_child('vserver', self.parameters['vserver']) + if lun_path is not None: + query_details.add_new_child('lun_path', lun_path) + else: + query_details.add_new_child('volume', self.parameters['flexvol_name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + + while True: + lun_info = netapp_utils.zapi.NaElement('lun-get-iter') + lun_info.add_child_elem(query) + if tag: + lun_info.add_new_child('tag', tag, True) + + try: + result = self.server.invoke_successfully(lun_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg="Error fetching luns for %s: %s" % + (self.parameters['flexvol_name'] if lun_path is None else lun_path, to_native(exc)), + exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + luns.extend(attr_list.get_children()) + tag = result.get_child_content('next-tag') + if tag is None: + break + return luns + + def get_lun_details(self, lun): + """ + Extract LUN details, from XML to python dict + + :return: Details about the lun + :rtype: dict + """ + if self.use_rest: + return lun + return_value = {'size': int(lun.get_child_content('size'))} + bool_attr_map = { + 'is-space-alloc-enabled': 'space_allocation', + 'is-space-reservation-enabled': 'space_reserve' + } + for attr in bool_attr_map: + value = lun.get_child_content(attr) + if value is not None: + return_value[bool_attr_map[attr]] = self.na_helper.get_value_for_bool(True, value) + str_attr_map = { + 'comment': 'comment', + 'multiprotocol-type': 'os_type', + 'name': 'name', + 'path': 'path', + 'qos-policy-group': 'qos_policy_group', + 'qos-adaptive-policy-group': 'qos_adaptive_policy_group', + } + for attr in str_attr_map: + value = lun.get_child_content(attr) + if value is None and attr in ('comment', 'qos-policy-group', 'qos-adaptive-policy-group'): + value = '' + if value is not None: + return_value[str_attr_map[attr]] = value + + return return_value + + def find_lun(self, luns, name, lun_path=None): + """ + Return lun record matching name or path + + :return: lun record + :rtype: XML for ZAPI, dict for REST, or None if not found + """ + if luns: + for lun in luns: + path = lun['path'] + if lun_path is None: + if name == path: + return lun + _rest, _splitter, found_name = path.rpartition('/') + if found_name == name: + return lun + elif lun_path == path: + return lun + return None + + def get_lun(self, name, lun_path=None): + """ + Return details about the LUN + + :return: Details about the lun + :rtype: dict + """ + luns = self.get_luns(lun_path) + lun = self.find_lun(luns, name, lun_path) + if lun is not None: + return self.get_lun_details(lun) + return None + + def get_luns_from_app(self): + app_details, error = self.rest_app.get_application_details() + self.fail_on_error(error) + if app_details is not None: + app_details['paths'] = self.get_lun_paths_from_app() + return app_details + + def get_lun_paths_from_app(self): + """Get luns path for SAN application""" + backing_storage, error = self.rest_app.get_application_component_backing_storage() + self.fail_on_error(error) + # {'luns': [{'path': '/vol/ansibleLUN/ansibleLUN_1', ... + if backing_storage is not None: + return [lun['path'] for lun in backing_storage.get('luns', [])] + return None + + def get_lun_path_from_backend(self, name): + """returns lun path matching name if found in backing_storage + retruns None if not found + """ + lun_paths = self.get_lun_paths_from_app() + match = "/%s" % name + return next((path for path in lun_paths if path.endswith(match)), None) + + def create_san_app_component(self, modify): + '''Create SAN application component''' + if modify: + required_options = ['name'] + action = 'modify' + if 'lun_count' in modify: + required_options.append('total_size') + else: + required_options = ('name', 'total_size') + action = 'create' + for option in required_options: + if self.parameters.get(option) is None: + self.module.fail_json(msg="Error: '%s' is required to %s a san application." % (option, action)) + + application_component = dict(name=self.parameters['name']) + if not modify: + application_component['lun_count'] = 1 # default value for create, may be overriden below + + for attr in ('igroup_name', 'lun_count', 'storage_service'): + if not modify or attr in modify: + value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr]) + if value is not None: + application_component[attr] = value + for attr in ('os_type', 'qos_policy_group', 'qos_adaptive_policy_group', 'total_size'): + if not self.rest_api.meets_rest_minimum_version(True, 9, 8, 0) and attr in ( + 'os_type', + 'qos_policy_group', + 'qos_adaptive_policy_group', + ): + # os_type and qos are not supported in 9.7 for the SAN application_component + continue + if not modify or attr in modify: + value = self.na_helper.safe_get(self.parameters, [attr]) + if value is not None: + # only one of them can be present at most + if attr in ('qos_policy_group', 'qos_adaptive_policy_group'): + attr = 'qos' + value = dict(policy=dict(name=value)) + application_component[attr] = value + tiering = self.na_helper.safe_get(self.parameters, ['san_application_template', 'tiering']) + if tiering is not None and not modify: + application_component['tiering'] = {} + for attr in ('control', 'policy', 'object_stores'): + value = tiering.get(attr) + if attr == 'object_stores' and value is not None: + value = [dict(name=x) for x in value] + if value is not None: + application_component['tiering'][attr] = value + return application_component + + def create_san_app_body(self, modify=None): + '''Create body for san template''' + # TODO: + # Should we support new_igroups? + # It may raise idempotency issues if the REST call fails if the igroup already exists. + # And we already have na_ontap_igroups. + san = { + 'application_components': [self.create_san_app_component(modify)], + } + for attr in ('protection_type',): + if not modify or attr in modify: + value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr]) + if value is not None: + # we expect value to be a dict, but maybe an empty dict + value = self.na_helper.filter_out_none_entries(value) + if value: + san[attr] = value + for attr in ('exclude_aggregates',): + if modify is None: # only used for create + values = self.na_helper.safe_get(self.parameters, ['san_application_template', attr]) + if values: + san[attr] = [dict(name=name) for name in values] + for attr in ('os_type',): + if not modify: # not supported for modify operation, but required at application component level for create + value = self.na_helper.safe_get(self.parameters, [attr]) + if value is not None: + san[attr] = value + body, error = self.rest_app.create_application_body('san', san) + return body, error + + def create_san_application(self): + '''Use REST application/applications san template to create one or more LUNs''' + body, error = self.create_san_app_body() + self.fail_on_error(error) + dummy, error = self.rest_app.create_application(body) + self.fail_on_error(error) + + def modify_san_application(self, modify): + '''Use REST application/applications san template to add one or more LUNs''' + body, error = self.create_san_app_body(modify) + self.fail_on_error(error) + # these cannot be present when using PATCH + body.pop('name') + body.pop('svm') + body.pop('smart_container') + dummy, error = self.rest_app.patch_application(body) + self.fail_on_error(error) + + def convert_to_san_application(self, scope): + '''First convert volume to smart container using POST + Second modify app to add new luns using PATCH + ''' + # dummy modify, so that we don't fill in the body + modify = dict(dummy='dummy') + body, error = self.create_san_app_body(modify) + self.fail_on_error(error) + dummy, error = self.rest_app.create_application(body) + self.fail_on_error(error) + app_current, error = self.rest_app.get_application_uuid() + self.fail_on_error(error) + if app_current is None: + self.module.fail_json(msg='Error: failed to create smart container for %s' % self.parameters['name']) + app_modify, app_modify_warning = self.app_changes(scope) + if app_modify_warning is not None: + self.module.warn(app_modify_warning) + if app_modify: + self.modify_san_application(app_modify) + + def delete_san_application(self): + '''Use REST application/applications san template to delete one or more LUNs''' + dummy, error = self.rest_app.delete_application() + self.fail_on_error(error) + + def create_lun(self): + """ + Create LUN with requested name and size + """ + if self.use_rest: + return self.create_lun_rest() + path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name']) + options = {'path': path, + 'size': str(self.parameters['size']), + 'space-reservation-enabled': self.na_helper.get_value_for_bool(False, self.parameters['space_reserve']), + 'use-exact-size': str(self.parameters['use_exact_size'])} + if self.parameters.get('space_allocation') is not None: + options['space-allocation-enabled'] = self.na_helper.get_value_for_bool(False, self.parameters['space_allocation']) + if self.parameters.get('comment') is not None: + options['comment'] = self.parameters['comment'] + if self.parameters.get('os_type') is not None: + options['ostype'] = self.parameters['os_type'] + if self.parameters.get('qos_policy_group') is not None: + options['qos-policy-group'] = self.parameters['qos_policy_group'] + if self.parameters.get('qos_adaptive_policy_group') is not None: + options['qos-adaptive-policy-group'] = self.parameters['qos_adaptive_policy_group'] + lun_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-create-by-size', **options) + + try: + self.server.invoke_successfully(lun_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" + % (self.parameters['name'], self.parameters['size'], to_native(exc)), + exception=traceback.format_exc()) + + def delete_lun(self, path): + """ + Delete requested LUN + """ + if self.use_rest: + return self.delete_lun_rest() + lun_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-destroy', **{'path': path, + 'force': str(self.parameters['force_remove']), + 'destroy-fenced-lun': + str(self.parameters['force_remove_fenced'])}) + + try: + self.server.invoke_successfully(lun_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(exc)), + exception=traceback.format_exc()) + + def resize_lun(self, path): + """ + Resize requested LUN + + :return: True if LUN was actually re-sized, false otherwise. + :rtype: bool + """ + if self.use_rest: + return self.resize_lun_rest() + lun_resize = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-resize', **{'path': path, + 'size': str(self.parameters['size']), + 'force': str(self.parameters['force_resize'])}) + try: + self.server.invoke_successfully(lun_resize, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + if to_native(exc.code) == "9042": + # Error 9042 denotes the new LUN size being the same as the + # old LUN size. This happens when there's barely any difference + # in the two sizes. For example, from 8388608 bytes to + # 8194304 bytes. This should go away if/when the default size + # requested/reported to/from the controller is changed to a + # larger unit (MB/GB/TB). + return False + else: + self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(exc)), + exception=traceback.format_exc()) + + return True + + def set_lun_value(self, path, key, value): + key_to_zapi = dict( + comment=('lun-set-comment', 'comment'), + # The same ZAPI is used for both QOS attributes + qos_policy_group=('lun-set-qos-policy-group', 'qos-policy-group'), + qos_adaptive_policy_group=('lun-set-qos-policy-group', 'qos-adaptive-policy-group'), + space_allocation=('lun-set-space-alloc', 'enable'), + space_reserve=('lun-set-space-reservation-info', 'enable') + ) + if key in key_to_zapi: + zapi, option = key_to_zapi[key] + else: + self.module.fail_json(msg="option %s cannot be modified to %s" % (key, value)) + options = dict(path=path) + if option == 'enable': + options[option] = self.na_helper.get_value_for_bool(False, value) + else: + options[option] = value + + lun_set = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + try: + self.server.invoke_successfully(lun_set, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg="Error setting lun option %s: %s" % (key, to_native(exc)), + exception=traceback.format_exc()) + return + + def modify_lun(self, path, modify): + """ + update LUN properties (except size or name) + """ + if self.use_rest: + return self.modify_lun_rest(modify) + for key in sorted(modify): + self.set_lun_value(path, key, modify[key]) + + def rename_lun(self, path, new_path): + """ + rename LUN + """ + if self.use_rest: + return self.rename_lun_rest(new_path) + lun_move = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-move', **{'path': path, + 'new-path': new_path}) + try: + self.server.invoke_successfully(lun_move, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg="Error moving lun %s: %s" % (path, to_native(exc)), + exception=traceback.format_exc()) + + def fail_on_error(self, error, stack=False): + if error is None: + return + elements = dict(msg="Error: %s" % error) + if stack: + elements['stack'] = traceback.format_stack() + self.module.fail_json(**elements) + + def set_total_size(self, validate): + # fix total_size attribute, report error if total_size is missing (or size is missing) + attr = 'total_size' + value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr]) + if value is not None or not validate: + self.parameters[attr] = value + return + lun_count = self.na_helper.safe_get(self.parameters, ['san_application_template', 'lun_count']) + value = self.parameters.get('size') + if value is not None and (lun_count is None or lun_count == 1): + self.parameters[attr] = value + return + self.module.fail_json(msg="Error: 'total_size' is a required SAN application template attribute when creating a LUN application") + + def validate_app_create(self): + # fix total_size attribute + self.set_total_size(validate=True) + + def validate_app_changes(self, modify, warning): + saved_modify = dict(modify) + errors = [ + "Error: the following application parameter cannot be modified: %s. Received: %s." + % (key, str(modify)) + for key in modify + if key not in ('igroup_name', 'os_type', 'lun_count', 'total_size') + ] + + extra_attrs = tuple() + if 'lun_count' in modify: + extra_attrs = ('total_size', 'os_type', 'igroup_name') + else: + ignored_keys = [key for key in modify if key not in ('total_size',)] + for key in ignored_keys: + self.module.warn( + "Ignoring: %s. This application parameter is only relevant when increasing the LUN count. Received: %s." + % (key, str(saved_modify))) + modify.pop(key) + for attr in extra_attrs: + value = self.parameters.get(attr) + if value is None: + value = self.na_helper.safe_get(self.parameters['san_application_template'], [attr]) + if value is None: + errors.append('Error: %s is a required parameter when increasing lun_count.' % attr) + else: + modify[attr] = value + if errors: + self.module.fail_json(msg='\n'.join(errors)) + if 'total_size' in modify: + self.set_total_size(validate=False) + if warning and 'lun_count' not in modify: + # can't change total_size, let's ignore it + self.module.warn(warning) + modify.pop('total_size') + saved_modify.pop('total_size') + if modify and not self.rest_api.meets_rest_minimum_version(True, 9, 8): + self.module.fail_json( + msg='Error: modifying %s is not supported on ONTAP 9.7' % ', '.join(saved_modify.keys())) + + def fail_on_large_size_reduction(self, app_current, desired, provisioned_size): + """ Error if a reduction of size > 10% is requested. + Warn for smaller reduction and ignore it, to protect against 'rounding' errors. + """ + total_size = app_current['total_size'] + desired_size = desired.get('total_size') + warning = None + if desired_size is not None: + details = "total_size=%d, provisioned=%d, requested=%d" % (total_size, provisioned_size, desired_size) + if desired_size < total_size: + # * 100 to get a percentage, and .0 to force float conversion + reduction = round((total_size - desired_size) * 100.0 / total_size, 1) + if reduction > 10: + self.module.fail_json(msg="Error: can't reduce size: %s" % details) + else: + warning = "Ignoring small reduction (%.1f %%) in total size: %s" % (reduction, details) + elif desired_size > total_size and desired_size < provisioned_size: + # we can't increase, but we can't say it is a problem, as the size is already bigger! + warning = "Ignoring increase: requested size is too small: %s" % details + return warning + + def get_luns_rest(self, lun_path=None): + if lun_path is None and self.parameters.get('flexvol_name') is None: + return [] + api = 'storage/luns' + query = { + 'svm.name': self.parameters['vserver'], + 'fields': "comment,lun_maps,name,os_type,qos_policy.name,space"} + if lun_path is not None: + query['name'] = lun_path + else: + query['location.volume.name'] = self.parameters['flexvol_name'] + record, error = rest_generic.get_0_or_more_records(self.rest_api, api, query) + if error: + if lun_path is not None: + self.module.fail_json(msg="Error getting lun_path %s: %s" % (lun_path, to_native(error)), + exception=traceback.format_exc()) + else: + self.module.fail_json( + msg="Error getting LUN's for flexvol %s: %s" % (self.parameters['flexvol_name'], to_native(error)), + exception=traceback.format_exc()) + return self.format_get_luns(record) + + def format_get_luns(self, records): + luns = [] + if not records: + return None + for record in records: + # TODO: Check that path and name are the same in Rest + lun = { + 'uuid': self.na_helper.safe_get(record, ['uuid']), + 'name': self.na_helper.safe_get(record, ['name']), + 'path': self.na_helper.safe_get(record, ['name']), + 'size': self.na_helper.safe_get(record, ['space', 'size']), + 'comment': self.na_helper.safe_get(record, ['comment']), + 'flexvol_name': self.na_helper.safe_get(record, ['location', 'volume', 'name']), + 'os_type': self.na_helper.safe_get(record, ['os_type']), + 'qos_policy_group': self.na_helper.safe_get(record, ['qos_policy', 'name']), + 'space_reserve': self.na_helper.safe_get(record, ['space', 'guarantee', 'requested']), + 'space_allocation': self.na_helper.safe_get(record, + ['space', 'scsi_thin_provisioning_support_enabled']), + } + luns.append(lun) + return luns + + def create_lun_rest(self): + name = self.create_lun_path_rest() + api = 'storage/luns' + body = { + 'svm.name': self.parameters['vserver'], + 'name': name, + } + if self.parameters.get('flexvol_name') is not None: + body['location.volume.name'] = self.parameters['flexvol_name'] + if self.parameters.get('os_type') is not None: + body['os_type'] = self.parameters['os_type'] + if self.parameters.get('size') is not None: + body['space.size'] = self.parameters['size'] + if self.parameters.get('space_reserve') is not None: + body['space.guarantee.requested'] = self.parameters['space_reserve'] + if self.parameters.get('space_allocation') is not None: + body['space.scsi_thin_provisioning_support_enabled'] = self.parameters['space_allocation'] + if self.parameters.get('comment') is not None: + body['comment'] = self.parameters['comment'] + if self.parameters.get('qos_policy_group') is not None: + body['qos_policy.name'] = self.parameters['qos_policy_group'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating LUN %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def create_lun_path_rest(self): + """ ZAPI accepts just a name, while REST expects a path. We need to convert a name in to a path for backward compatibility + If the name start with a slash we will assume it a path and use it as the name + """ + if not self.parameters['name'].startswith('/') and self.parameters.get('flexvol_name') is not None: + # if it dosn't start with a slash and we have a flexvol name we will use it to build the path + return '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name']) + return self.parameters['name'] + + def delete_lun_rest(self): + if self.uuid is None: + self.module.fail_json(msg="Error deleting LUN %s: UUID not found" % self.parameters['name']) + api = 'storage/luns' + query = {'allow_delete_while_mapped': self.parameters['force_remove']} + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid, query) + if error: + self.module.fail_json(msg="Error deleting LUN %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def rename_lun_rest(self, new_path): + if self.uuid is None: + self.module.fail_json(msg="Error renaming LUN %s: UUID not found" % self.parameters['name']) + api = 'storage/luns' + body = {'name': new_path} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg="Error renaming LUN %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def resize_lun_rest(self): + if self.uuid is None: + self.module.fail_json(msg="Error resizing LUN %s: UUID not found" % self.parameters['name']) + api = 'storage/luns' + body = {'space.size': self.parameters['size']} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + if 'New LUN size is the same as the old LUN size' in error: + return False + self.module.fail_json(msg="Error resizing LUN %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return True + + def modify_lun_rest(self, modify): + local_modify = modify.copy() + if self.uuid is None: + self.module.fail_json(msg="Error modifying LUN %s: UUID not found" % self.parameters['name']) + api = 'storage/luns' + body = {} + if local_modify.get('space_reserve') is not None: + body['space.guarantee.requested'] = local_modify.pop('space_reserve') + if local_modify.get('space_allocation') is not None: + body['space.scsi_thin_provisioning_support_enabled'] = local_modify.pop('space_allocation') + if local_modify.get('comment') is not None: + body['comment'] = local_modify.pop('comment') + if local_modify.get('qos_policy_group') is not None: + body['qos_policy.name'] = local_modify.pop('qos_policy_group') + if local_modify != {}: + self.module.fail_json( + msg="Error modifying LUN %s: Unknown parameters: %s" % (self.parameters['name'], local_modify)) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg="Error modifying LUN %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def check_for_errors(self, lun_cd_action, current, modify): + errors = [] + if lun_cd_action == 'create': + if self.parameters.get('flexvol_name') is None: + errors.append("The flexvol_name parameter is required for creating a LUN.") + if self.use_rest and self.parameters.get('os_type') is None: + errors.append("The os_type parameter is required for creating a LUN with REST.") + if self.parameters.get('size') is None: + self.module.fail_json(msg="size is a required parameter for create.") + elif modify and 'os_type' in modify: + self.module.fail_json(msg="os_type cannot be modified: current: %s, desired: %s" % (current['os_type'], modify['os_type'])) + if errors: + self.module.fail_json(msg=' '.join(errors)) + + def set_uuid(self, current): + if self.use_rest and current is not None and current.get('uuid') is not None: + self.uuid = current['uuid'] + + def app_changes(self, scope): + # find and validate app changes + app_current, error = self.rest_app.get_application_details('san') + self.fail_on_error(error) + # save application name, as it is overriden in the flattening operation + app_name = app_current['name'] + # there is an issue with total_size not reflecting the real total_size, and some additional overhead + provisioned_size = self.na_helper.safe_get(app_current, ['statistics', 'space', 'provisioned']) + if provisioned_size is None: + provisioned_size = 0 + if self.debug: + self.debug['app_current'] = app_current # will be updated below as it is mutable + self.debug['got'] = copy.deepcopy(app_current) # fixed copy + # flatten + app_current = app_current['san'] # app template + app_current.update(app_current['application_components'][0]) # app component + del app_current['application_components'] + # if component name does not match, assume a change at LUN level + comp_name = app_current['name'] + if comp_name != self.parameters['name']: + msg = "desired component/volume name: %s does not match existing component name: %s" % (self.parameters['name'], comp_name) + if scope == 'application': + self.module.fail_json(msg='Error: ' + msg + ". scope=%s" % scope) + return None, msg + ". scope=%s, assuming 'lun' scope." % scope + # restore app name + app_current['name'] = app_name + + # ready to compare, except for a quirk in size handling + desired = dict(self.parameters['san_application_template']) + warning = self.fail_on_large_size_reduction(app_current, desired, provisioned_size) + + # preserve change state before calling modify in case an ignorable total_size change is the only change + changed = self.na_helper.changed + app_modify = self.na_helper.get_modified_attributes(app_current, desired) + self.validate_app_changes(app_modify, warning) + if not app_modify: + self.na_helper.changed = changed + app_modify = None + return app_modify, None + + def get_app_apply(self): + scope = self.na_helper.safe_get(self.parameters, ['san_application_template', 'scope']) + app_current, error = self.rest_app.get_application_uuid() + self.fail_on_error(error) + if scope == 'lun' and app_current is None: + self.module.fail_json(msg='Application not found: %s. scope=%s.' % + (self.na_helper.safe_get(self.parameters, ['san_application_template', 'name']), + scope)) + return scope, app_current + + def app_actions(self, app_current, scope, actions, results): + app_modify, app_modify_warning = None, None + app_cd_action = self.na_helper.get_cd_action(app_current, self.parameters) + if app_cd_action == 'create': + # check if target volume already exists + cp_volume_name = self.parameters['name'] + volume, error = rest_volume.get_volume(self.rest_api, self.parameters['vserver'], cp_volume_name) + self.fail_on_error(error) + if volume is not None: + if scope == 'application': + # volume already exists, but not as part of this application + app_cd_action = 'convert' + if not self.rest_api.meets_rest_minimum_version(True, 9, 8, 0): + msg = 'Error: converting a LUN volume to a SAN application container requires ONTAP 9.8 or better.' + self.module.fail_json(msg=msg) + else: + # default name already in use, ask user to clarify intent + msg = "Error: volume '%s' already exists. Please use a different group name, or use 'application' scope. scope=%s" + self.module.fail_json(msg=msg % (cp_volume_name, scope)) + if app_cd_action is not None: + actions.append('app_%s' % app_cd_action) + if app_cd_action == 'create': + self.validate_app_create() + if app_cd_action is None and app_current is not None: + app_modify, app_modify_warning = self.app_changes(scope) + if app_modify: + actions.append('app_modify') + results['app_modify'] = dict(app_modify) + return app_cd_action, app_modify, app_modify_warning + + def lun_actions(self, app_current, actions, results, scope, app_modify, app_modify_warning): + # actions at LUN level + lun_cd_action, lun_modify, lun_rename = None, None, None + lun_path, from_lun_path = None, None + from_name = self.parameters.get('from_name') + if self.rest_app and app_current: + # For LUNs created using a SAN application, we're getting lun paths from the backing storage + lun_path = self.get_lun_path_from_backend(self.parameters['name']) + if from_name is not None: + from_lun_path = self.get_lun_path_from_backend(from_name) + current = self.get_lun(self.parameters['name'], lun_path) + self.set_uuid(current) + if current is not None and lun_path is None: + lun_path = current['path'] + lun_cd_action = self.na_helper.get_cd_action(current, self.parameters) + if lun_cd_action == 'create' and from_name is not None: + # create by renaming existing LUN, if it exists + old_lun = self.get_lun(from_name, from_lun_path) + lun_rename = self.na_helper.is_rename_action(old_lun, current) + if lun_rename is None: + self.module.fail_json(msg="Error renaming lun: %s does not exist" % from_name) + if lun_rename: + current = old_lun + if from_lun_path is None: + from_lun_path = current['path'] + head, _sep, tail = from_lun_path.rpartition(from_name) + if tail: + self.module.fail_json( + msg="Error renaming lun: %s does not match lun_path %s" % (from_name, from_lun_path)) + self.set_uuid(current) + lun_path = head + self.parameters['name'] + lun_cd_action = None + actions.append('lun_rename') + app_modify_warning = None # reset warning as we found a match + if lun_cd_action is not None: + actions.append('lun_%s' % lun_cd_action) + if lun_cd_action is None and self.parameters['state'] == 'present': + # we already handled rename if required + current.pop('name', None) + lun_modify = self.na_helper.get_modified_attributes(current, self.parameters) + if lun_modify: + actions.append('lun_modify') + results['lun_modify'] = dict(lun_modify) + app_modify_warning = None # reset warning as we found a match + if lun_cd_action and self.rest_app and app_current: + msg = 'This module does not support %s a LUN by name %s a SAN application.' % \ + ('adding', 'to') if lun_cd_action == 'create' else ('removing', 'from') + if scope == 'auto': + # ignore LUN not found, as name can be a group name + self.module.warn(msg + ". scope=%s, assuming 'application'" % scope) + if not app_modify: + self.na_helper.changed = False + elif scope == 'lun': + self.module.fail_json(msg=msg + ". scope=%s." % scope) + lun_cd_action = None + self.check_for_errors(lun_cd_action, current, lun_modify) + return lun_path, from_lun_path, lun_cd_action, lun_rename, lun_modify, app_modify_warning + + def lun_modify_after_app_update(self, lun_path, results): + # modify at LUN level, as app modify does not set some LUN level options (eg space_reserve) + if lun_path is None: + lun_path = self.get_lun_path_from_backend(self.parameters['name']) + current = self.get_lun(self.parameters['name'], lun_path) + self.set_uuid(current) + # we already handled rename if required + current.pop('name', None) + lun_modify = self.na_helper.get_modified_attributes(current, self.parameters) + if lun_modify: + results['lun_modify_after_app_update'] = dict(lun_modify) + self.check_for_errors(None, current, lun_modify) + return lun_modify + + def apply(self): + results = {} + app_cd_action, app_modify, lun_cd_action, lun_modify, lun_rename = None, None, None, None, None + app_modify_warning, app_current, lun_path, from_lun_path = None, None, None, None + actions = [] + if self.rest_app: + scope, app_current = self.get_app_apply() + else: + # no application template, fall back to LUN only + scope = 'lun' + if self.rest_app and scope != 'lun': + app_cd_action, app_modify, app_modify_warning = self.app_actions(app_current, scope, actions, results) + if app_cd_action is None and scope != 'application': + lun_path, from_lun_path, lun_cd_action, lun_rename, lun_modify, app_modify_warning = \ + self.lun_actions(app_current, actions, results, scope, app_modify, app_modify_warning) + if self.na_helper.changed and not self.module.check_mode: + if app_cd_action == 'create': + self.create_san_application() + elif app_cd_action == 'convert': + self.convert_to_san_application(scope) + elif app_cd_action == 'delete': + self.rest_app.delete_application() + elif lun_cd_action == 'create': + self.create_lun() + elif lun_cd_action == 'delete': + self.delete_lun(lun_path) + else: + if app_modify: + self.modify_san_application(app_modify) + if lun_rename: + self.rename_lun(from_lun_path, lun_path) + if app_modify: + # space_reserve will be set to True + # To match input parameters, lun_modify is recomputed. + lun_modify = self.lun_modify_after_app_update(lun_path, results) + size_changed = False + if lun_modify and 'size' in lun_modify: + # Ensure that size was actually changed. Please + # read notes in 'resize_lun' function for details. + size_changed = self.resize_lun(lun_path) + lun_modify.pop('size') + if lun_modify: + self.modify_lun(lun_path, lun_modify) + if not lun_modify and not lun_rename and not app_modify: + # size may not have changed + self.na_helper.changed = size_changed + + if app_modify_warning: + self.module.warn(app_modify_warning) + result = netapp_utils.generate_result(self.na_helper.changed, actions, + extra_responses={'debug': self.debug} if self.debug else None) + self.module.exit_json(**result) + + +def main(): + lun = NetAppOntapLUN() + lun.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py new file mode 100644 index 000000000..94a443b6e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_copy.py @@ -0,0 +1,221 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_lun_copy + +short_description: NetApp ONTAP copy LUNs +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Copy LUNs on NetApp ONTAP. + +options: + + state: + description: + - Whether the specified LUN should exist or not. + choices: ['present'] + type: str + default: present + + destination_vserver: + description: + - the name of the Vserver that will host the new LUN. + required: true + type: str + aliases: ['vserver'] + + destination_path: + description: + - Specifies the full path to the new LUN. + required: true + type: str + + source_path: + description: + - Specifies the full path to the source LUN. + required: true + type: str + + source_vserver: + description: + - Specifies the name of the vserver hosting the LUN to be copied. + - If not provided, C(destination_vserver) value is set as default. + - with REST, this option value must match C(destination_vserver) when present. + type: str + +notes: + - supports ZAPI and REST. REST requires ONTAP 9.10.1 or later. + - supports check mode. + - REST supports intra-Vserver lun copy only. + ''' +EXAMPLES = """ +- name: Copy LUN + netapp.ontap.na_ontap_lun_copy: + destination_vserver: ansible + destination_path: /vol/test/test_copy_dest_dest_new + source_path: /vol/test/test_copy_1 + source_vserver: ansible + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapLUNCopy: + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + destination_vserver=dict(required=True, type='str', aliases=['vserver']), + destination_path=dict(required=True, type='str'), + source_path=dict(required=True, type='str'), + source_vserver=dict(required=False, type='str'), + + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + # if source_vserver not present, set destination_vserver value for intra-vserver copy operation. + if not self.parameters.get('source_vserver'): + self.parameters['source_vserver'] = self.parameters['destination_vserver'] + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + msg = 'REST requires ONTAP 9.10.1 or later for na_ontap_lun_copy' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, + vserver=self.parameters['destination_vserver']) + + def get_lun(self): + """ + Check if the LUN exists + + :return: true is it exists, false otherwise + :rtype: bool + """ + + if self.use_rest: + return self.get_lun_rest() + return_value = False + lun_info = netapp_utils.zapi.NaElement('lun-get-iter') + query_details = netapp_utils.zapi.NaElement('lun-info') + + query_details.add_new_child('path', self.parameters['destination_path']) + query_details.add_new_child('vserver', self.parameters['destination_vserver']) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + + lun_info.add_child_elem(query) + try: + result = self.server.invoke_successfully(lun_info, True) + + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" % + (self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(e)), + exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return_value = True + return return_value + + def copy_lun(self): + """ + Copy LUN with requested path and vserver + """ + if self.use_rest: + return self.copy_lun_rest() + lun_copy = netapp_utils.zapi.NaElement.create_node_with_children( + 'lun-copy-start', **{'source-vserver': self.parameters['source_vserver']}) + path_obj = netapp_utils.zapi.NaElement('paths') + pair = netapp_utils.zapi.NaElement('lun-path-pair') + pair.add_new_child('destination-path', self.parameters['destination_path']) + pair.add_new_child('source-path', self.parameters['source_path']) + path_obj.add_child_elem(pair) + lun_copy.add_child_elem(path_obj) + + try: + self.server.invoke_successfully(lun_copy, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" % + (self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(e)), + exception=traceback.format_exc()) + + def get_lun_rest(self): + api = 'storage/luns' + params = { + 'svm.name': self.parameters['destination_vserver'], + 'name': self.parameters['destination_path'] + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" % + (self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(error))) + return True if record else False + + def copy_lun_rest(self): + api = 'storage/luns' + body = { + 'copy': {'source': {'name': self.parameters['source_path']}}, + 'name': self.parameters['destination_path'], + 'svm.name': self.parameters['destination_vserver'] + } + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" % + (self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(error))) + + def apply(self): + if self.get_lun(): # lun already exists at destination + changed = False + else: + if self.use_rest and self.parameters['source_vserver'] != self.parameters['destination_vserver']: + self.module.fail_json(msg="Error: REST does not supports inter-Vserver lun copy.") + changed = True + if not self.module.check_mode: + # need to copy lun + if self.parameters['state'] == 'present': + self.copy_lun() + + self.module.exit_json(changed=changed) + + +def main(): + v = NetAppOntapLUNCopy() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py new file mode 100644 index 000000000..5bdbc17c8 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map.py @@ -0,0 +1,356 @@ +#!/usr/bin/python + +""" this is lun mapping module + + (c) 2018-2022, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_lun_map +short_description: NetApp ONTAP LUN maps +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Map and unmap LUNs on NetApp ONTAP. + +options: + + state: + description: + - Whether the specified LUN should exist or not. + choices: ['present', 'absent'] + type: str + default: present + + initiator_group_name: + description: + - Initiator group to map to the given LUN. + required: true + type: str + + path: + description: + - Path of the LUN.. + required: true + type: str + + vserver: + required: true + description: + - The name of the vserver to use. + type: str + + lun_id: + description: + - LUN ID assigned for the map. + type: str +""" + +EXAMPLES = """ +- name: Create LUN mapping + na_ontap_lun_map: + state: present + initiator_group_name: ansibleIgroup3234 + path: /vol/iscsi_path/iscsi_lun + vserver: ci_dev + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Unmap LUN + na_ontap_lun_map: + state: absent + initiator_group_name: ansibleIgroup3234 + path: /vol/iscsi_path/iscsi_lun + vserver: ci_dev + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +lun_node: + description: NetApp controller that is hosting the LUN. (Note Not returned with REST) + returned: success + type: str + sample: node01 +lun_ostype: + description: Specifies the OS of the host accessing the LUN. + returned: success + type: str + sample: vmware +lun_serial: + description: A unique, 12-byte, ASCII string used to identify the LUN. + returned: success + type: str + sample: 80E7/]LZp1Tt +lun_naa_id: + description: The Network Address Authority (NAA) identifier for the LUN. + returned: success + type: str + sample: 600a0980383045372f5d4c5a70315474 +lun_state: + description: Online or offline status of the LUN. + returned: success + type: str + sample: online +lun_size: + description: Size of the LUN in bytes. + returned: success + type: int + sample: 2199023255552 +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +import codecs +from ansible.module_utils._text import to_text, to_bytes +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapLUNMap: + """ + Class with LUN map methods + """ + + def __init__(self): + self.lun_uuid, self.igroup_uuid = None, None + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + initiator_group_name=dict(required=True, type='str'), + path=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + lun_id=dict(required=False, type='str', default=None), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['path']) + ], + supports_check_mode=True + ) + self.result = dict( + changed=False, + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_lun_map(self): + """ + Return details about the LUN map + + :return: Details about the lun map + :rtype: dict + """ + if self.use_rest: + return self.get_lun_map_rest() + lun_info = netapp_utils.zapi.NaElement('lun-map-list-info') + lun_info.add_new_child('path', self.parameters['path']) + result = self.server.invoke_successfully(lun_info, True) + return_value = None + igroups = result.get_child_by_name('initiator-groups') + if igroups: + for igroup_info in igroups.get_children(): + initiator_group_name = igroup_info.get_child_content('initiator-group-name') + lun_id = igroup_info.get_child_content('lun-id') + if initiator_group_name == self.parameters['initiator_group_name']: + return_value = { + 'lun_id': lun_id + } + break + + return return_value + + def get_lun(self): + """ + Return details about the LUN + + :return: Details about the lun + :rtype: dict + """ + if self.use_rest: + return self.get_lun_rest() + # build the lun query + query_details = netapp_utils.zapi.NaElement('lun-info') + query_details.add_new_child('path', self.parameters['path']) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + + lun_query = netapp_utils.zapi.NaElement('lun-get-iter') + lun_query.add_child_elem(query) + + # find lun using query + result = self.server.invoke_successfully(lun_query, True) + return_value = None + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + lun = result.get_child_by_name('attributes-list').get_child_by_name('lun-info') + + return_value = { + 'lun_node': lun.get_child_content('node'), + 'lun_ostype': lun.get_child_content('multiprotocol-type'), + 'lun_serial': lun.get_child_content('serial-number'), + 'lun_naa_id': self.return_naa_id(lun.get_child_content('serial-number')), + 'lun_state': lun.get_child_content('state'), + 'lun_size': lun.get_child_content('size'), + } + + return return_value + + def return_naa_id(self, serial_number): + hexlify = codecs.getencoder('hex') + return '600a0980' + to_text(hexlify(to_bytes(serial_number))[0]) + + def create_lun_map(self): + """ + Create LUN map + """ + if self.use_rest: + return self.create_lun_map_rest() + options = {'path': self.parameters['path'], 'initiator-group': self.parameters['initiator_group_name']} + if self.parameters['lun_id'] is not None: + options['lun-id'] = self.parameters['lun_id'] + lun_map_create = netapp_utils.zapi.NaElement.create_node_with_children('lun-map', **options) + + try: + self.server.invoke_successfully(lun_map_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error mapping lun %s of initiator_group_name %s: %s" % + (self.parameters['path'], self.parameters['initiator_group_name'], to_native(e)), + exception=traceback.format_exc()) + + def delete_lun_map(self): + """ + Unmap LUN map + """ + if self.use_rest: + return self.delete_lun_map_rest() + lun_map_delete = netapp_utils.zapi.NaElement.create_node_with_children('lun-unmap', **{ + 'path': self.parameters['path'], 'initiator-group': self.parameters['initiator_group_name']}) + + try: + self.server.invoke_successfully(lun_map_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg="Error unmapping lun %s of initiator_group_name %s: %s" % + (self.parameters['path'], self.parameters['initiator_group_name'], to_native(e)), + exception=traceback.format_exc()) + + def get_lun_rest(self): + api = 'storage/luns' + params = {'name': self.parameters['path'], + 'svm.name': self.parameters['vserver'], + 'fields': 'name,' + 'os_type,' + 'serial_number,' + 'status.state,' + 'space.size,' + 'uuid,' + 'lun_maps' + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error getting lun %s: %s' % (self.parameters['path'], error)) + if record: + return {'lun_ostype': self.na_helper.safe_get(record, ['os_type']), + 'lun_serial': self.na_helper.safe_get(record, ['serial_number']), + 'lun_naa_id': self.return_naa_id(self.na_helper.safe_get(record, ['serial_number'])), + 'lun_state': self.na_helper.safe_get(record, ['status', 'state']), + 'lun_size': self.na_helper.safe_get(record, ['space', 'size']), + } + return None + + def get_lun_map_rest(self): + api = 'protocols/san/lun-maps' + params = {'lun.name': self.parameters['path'], + 'svm.name': self.parameters['vserver'], + 'igroup.name': self.parameters['initiator_group_name'], + 'fields': 'logical_unit_number,igroup.uuid,lun.uuid,lun.name,igroup.name' + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error getting lun_map %s: %s' % (self.parameters['path'], error)) + if record: + return {'lun_id': str(self.na_helper.safe_get(record, ['logical_unit_number'])), + 'igroup_uuid': self.na_helper.safe_get(record, ['igroup', 'uuid']), + 'initiator_group_name': self.na_helper.safe_get(record, ['igroup', 'name']), + 'lun_uuid': self.na_helper.safe_get(record, ['lun', 'uuid']), + 'path': self.na_helper.safe_get(record, ['lun', 'name']), + } + return None + + def create_lun_map_rest(self): + api = 'protocols/san/lun-maps' + body = {'svm.name': self.parameters['vserver'], + 'igroup.name': self.parameters['initiator_group_name'], + 'lun.name': self.parameters['path']} + if self.parameters.get('lun_id') is not None: + body['logical_unit_number'] = self.parameters['lun_id'] + dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating lun_map %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def delete_lun_map_rest(self): + api = 'protocols/san/lun-maps' + both_uuids = '%s/%s' % (self.lun_uuid, self.igroup_uuid) + dummy, error = rest_generic.delete_async(self.rest_api, api, both_uuids, job_timeout=120) + if error: + self.module.fail_json(msg='Error deleting lun_map %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + lun_details = self.get_lun() + # why do we do this, it never used in the module, and has nothing to do with lun_map (it probably should be in + # the lun module + current = self.get_lun_map() + if self.use_rest and current: + self.lun_uuid = current.get('lun_uuid', None) + self.igroup_uuid = current.get('igroup_uuid', None) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify: + self.module.fail_json(msg="Modification of lun_map not allowed") + if self.parameters['state'] == 'present' and lun_details: + self.result.update(lun_details) + self.result['changed'] = self.na_helper.changed + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_lun_map() + if cd_action == 'delete': + self.delete_lun_map() + self.module.exit_json(**self.result) + + +def main(): + v = NetAppOntapLUNMap() + v.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py new file mode 100644 index 000000000..607c8c430 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_lun_map_reporting_nodes.py @@ -0,0 +1,274 @@ +#!/usr/bin/python + +""" + (c) 2018-2022, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ + +module: na_ontap_lun_map_reporting_nodes + +short_description: NetApp ONTAP LUN maps reporting nodes +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.2.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Add and Remove LUN map reporting nodes. + +options: + state: + description: + - Whether to add or remove reporting nodes + choices: ['present', 'absent'] + type: str + default: present + + initiator_group_name: + description: + - Initiator group to map to the given LUN. + required: true + type: str + + path: + description: + - Path of the LUN. + required: true + type: str + + vserver: + required: true + description: + - The name of the vserver owning the LUN. + type: str + + nodes: + required: true + description: + - List of reporting nodes to add or remove + type: list + elements: str + +notes: + - supports ZAPI and REST. REST requires ONTAP 9.10.1 or later. + - supports check mode. +""" + +EXAMPLES = """ + - name: Create Lun Map reporting nodes + netapp.ontap.na_ontap_lun_map_reporting_nodes: + hostname: 172.21.121.82 + username: admin + password: netapp1! + https: true + validate_certs: false + vserver: vs1 + state: present + initiator_group_name: carchigroup + path: /vol/carchiVolTest/carchiLunTest + nodes: [node2] + + - name: Delete Lun Map reporting nodes + netapp.ontap.na_ontap_lun_map_reporting_nodes: + hostname: 172.21.121.82 + username: admin + password: netapp1! + https: true + validate_certs: false + vserver: vs1 + state: absent + initiator_group_name: carchigroup + path: /vol/carchiVolTest/carchiLunTest + nodes: [node2] + +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapLUNMapReportingNodes: + ''' add or remove reporting nodes from a lun map ''' + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + initiator_group_name=dict(required=True, type='str'), + path=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + nodes=dict(required=True, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.lun_uuid, self.igroup_uuid, self.nodes_uuids = None, None, {} + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + msg = 'REST requires ONTAP 9.10.1 or later for na_ontap_lun_map_reporting_nodes' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_lun_map_reporting_nodes(self): + """ + Return list of reporting nodes from the LUN map + + :return: list of reporting nodes + :rtype: list + """ + if self.use_rest: + return self.get_lun_map_reporting_nodes_rest() + query_details = netapp_utils.zapi.NaElement('lun-map-info') + query_details.add_new_child('path', self.parameters['path']) + query_details.add_new_child('initiator-group', self.parameters['initiator_group_name']) + query_details.add_new_child('vserver', self.parameters['vserver']) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + + lun_query = netapp_utils.zapi.NaElement('lun-map-get-iter') + lun_query.add_child_elem(query) + + try: + result = self.server.invoke_successfully(lun_query, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting LUN map for %s: %s' % + (self.parameters['initiator_group_name'], to_native(error)), + exception=traceback.format_exc()) + try: + num_records = int(result.get_child_content('num-records')) + except TypeError: + self.module.fail_json(msg="Error: unexpected ZAPI response for lun-map-get-iter: %s" % result.to_string()) + if num_records == 0: + return None + alist = result.get_child_by_name('attributes-list') + info = alist.get_child_by_name('lun-map-info') + reporting_nodes = info.get_child_by_name('reporting-nodes') + node_list = [] + if reporting_nodes: + for node in reporting_nodes.get_children(): + node_list.append(node.get_content()) + return node_list + + def add_lun_map_reporting_nodes(self, nodes): + reporting_nodes_obj = netapp_utils.zapi.NaElement('lun-map-add-reporting-nodes') + reporting_nodes_obj.add_new_child('igroup', self.parameters['initiator_group_name']) + reporting_nodes_obj.add_new_child('path', self.parameters['path']) + nodes_obj = netapp_utils.zapi.NaElement('nodes') + for node in nodes: + nodes_obj.add_new_child('filer-id', node) + reporting_nodes_obj.add_child_elem(nodes_obj) + try: + self.server.invoke_successfully(reporting_nodes_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating LUN map reporting nodes for %s: %s' % + (self.parameters['initiator_group_name'], to_native(error)), + exception=traceback.format_exc()) + + def remove_lun_map_reporting_nodes(self, nodes): + reporting_nodes_obj = netapp_utils.zapi.NaElement('lun-map-remove-reporting-nodes') + reporting_nodes_obj.add_new_child('igroup', self.parameters['initiator_group_name']) + reporting_nodes_obj.add_new_child('path', self.parameters['path']) + nodes_obj = netapp_utils.zapi.NaElement('nodes') + for node in nodes: + nodes_obj.add_new_child('filer-id', node) + reporting_nodes_obj.add_child_elem(nodes_obj) + try: + self.server.invoke_successfully(reporting_nodes_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting LUN map reporting nodes for %s: %s' % + (self.parameters['initiator_group_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_lun_map_reporting_nodes_rest(self): + api = 'protocols/san/lun-maps' + query = { + 'lun.name': self.parameters['path'], + 'igroup.name': self.parameters['initiator_group_name'], + 'svm.name': self.parameters['vserver'], + 'fields': 'reporting_nodes,lun.uuid,igroup.uuid' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error getting LUN map for %s: %s' % + (self.parameters['initiator_group_name'], to_native(error))) + if record: + self.lun_uuid = record['lun']['uuid'] + self.igroup_uuid = record['igroup']['uuid'] + node_list = [] + for node in record.get('reporting_nodes', []): + self.nodes_uuids[node['name']] = node['uuid'] + node_list.append(node['name']) + return node_list + return None + + def add_lun_map_reporting_nodes_rest(self, node): + api = 'protocols/san/lun-maps/%s/%s/reporting-nodes' % (self.lun_uuid, self.igroup_uuid) + dummy, error = rest_generic.post_async(self.rest_api, api, {'name': node}) + if error: + self.module.fail_json(msg='Error creating LUN map reporting node for %s: %s' % + (self.parameters['initiator_group_name'], to_native(error))) + + def remove_lun_map_reporting_nodes_rest(self, node): + api = 'protocols/san/lun-maps/%s/%s/reporting-nodes' % (self.lun_uuid, self.igroup_uuid) + dummy, error = rest_generic.delete_async(self.rest_api, api, self.nodes_uuids[node]) + if error: + self.module.fail_json(msg='Error deleting LUN map reporting nodes for %s: %s' % + (self.parameters['initiator_group_name'], to_native(error))) + + def apply(self): + reporting_nodes = self.get_lun_map_reporting_nodes() + if reporting_nodes is None: + self.module.fail_json(msg='Error: LUN map not found for vserver %s, LUN path: %s, igroup: %s' % + (self.parameters['vserver'], self.parameters['path'], self.parameters['initiator_group_name'])) + if self.parameters['state'] == 'present': + nodes_to_add = [node for node in self.parameters['nodes'] if node not in reporting_nodes] + nodes_to_delete = list() + else: + nodes_to_add = list() + nodes_to_delete = [node for node in self.parameters['nodes'] if node in reporting_nodes] + changed = len(nodes_to_add) > 0 or len(nodes_to_delete) > 0 + if changed and not self.module.check_mode: + if nodes_to_add: + if self.use_rest: + for node in nodes_to_add: + self.add_lun_map_reporting_nodes_rest(node) + else: + self.add_lun_map_reporting_nodes(nodes_to_add) + if nodes_to_delete: + if self.use_rest: + for node in nodes_to_delete: + self.remove_lun_map_reporting_nodes_rest(node) + else: + self.remove_lun_map_reporting_nodes(nodes_to_delete) + self.module.exit_json(changed=changed, reporting_nodes=reporting_nodes, nodes_to_add=nodes_to_add, nodes_to_delete=nodes_to_delete) + + +def main(): + na_module = NetAppOntapLUNMapReportingNodes() + na_module.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py new file mode 100644 index 000000000..bbacc8ce2 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_mcc_mediator.py @@ -0,0 +1,185 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# This module implements the operations for ONTAP MCC Mediator. +# The Mediator is supported for MCC IP configs from ONTAP 9.7 or later. +# This module requires REST APIs for Mediator which is supported from +# ONTAP 9.8 (DW) or later + +''' +na_ontap_mcc_mediator +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_mcc_mediator +short_description: NetApp ONTAP Add and Remove MetroCluster Mediator +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 20.9.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Add and remove ONTAP MCC Mediator +options: + state: + choices: ['present', 'absent'] + description: + - "Whether MCCIP Mediator is present or not." + default: present + type: str + + mediator_address: + description: + - ip address of the mediator + type: str + required: true + + mediator_user: + description: + - username of the mediator + type: str + required: true + + mediator_password: + description: + - password of the mediator + type: str + required: true + +''' + +EXAMPLES = """ + - name: Add ONTAP MCCIP Mediator + na_ontap_mcc_mediator: + state: present + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + mediator_address: mediator_ip + mediator_user: metrocluster_admin + mediator_password: netapp1! + + - name: Delete ONTAP MCCIP Mediator + na_ontap_mcc_mediator: + state: absent + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + mediator_user: metrocluster_admin + mediator_password: netapp1! +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppOntapMccipMediator(object): + """ + Mediator object for Add/Remove/Display + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + mediator_address=dict(required=True, type='str'), + mediator_user=dict(required=True, type='str'), + mediator_password=dict(required=True, type='str', no_log=True), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_mcc_mediator')) + + def add_mediator(self): + """ + Adds an ONTAP Mediator to MCC configuration + """ + api = 'cluster/mediators' + params = { + 'ip_address': self.parameters['mediator_address'], + 'password': self.parameters['mediator_password'], + 'user': self.parameters['mediator_user'] + } + dummy, error = self.rest_api.post(api, params) + if error: + self.module.fail_json(msg=error) + + def remove_mediator(self, current_uuid): + """ + Removes the ONTAP Mediator from MCC configuration + """ + api = 'cluster/mediators/%s' % current_uuid + params = { + 'ip_address': self.parameters['mediator_address'], + 'password': self.parameters['mediator_password'], + 'user': self.parameters['mediator_user'] + } + dummy, error = self.rest_api.delete(api, params) + if error: + self.module.fail_json(msg=error) + + def get_mediator(self): + """ + Determine if the MCC configuration has added an ONTAP Mediator + """ + api = "cluster/mediators" + message, error = self.rest_api.get(api, None) + if error: + self.module.fail_json(msg=error) + if message['num_records'] > 0: + return message['records'][0]['uuid'] + return None + + def apply(self): + """ + Apply action to MCC Mediator + """ + current = self.get_mediator() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.add_mediator() + elif cd_action == 'delete': + self.remove_mediator(current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Add, Remove and display ONTAP MCC Mediator + """ + mediator_obj = NetAppOntapMccipMediator() + mediator_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py new file mode 100644 index 000000000..bc149267c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +""" +(c) 2020, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +module: na_ontap_metrocluster +short_description: NetApp ONTAP set up a MetroCluster +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.9.0' +author: NetApp Ansible Team (@carchi8py) +requirements: + - ONTAP >= 9.8 + +description: + - Configure MetroCluster. +options: + state: + choices: ['present'] + description: + - Present to set up a MetroCluster + default: present + type: str + dr_pairs: + description: disaster recovery pair + type: list + required: true + elements: dict + suboptions: + node_name: + description: + - the name of the main node + required: true + type: str + partner_node_name: + description: + - the name of the main partner node + required: true + type: str + partner_cluster_name: + description: + - The name of the partner Cluster + required: true + type: str +''' + +EXAMPLES = ''' +- + name: Manage MetroCluster + hosts: localhost + collections: + - netapp.ontap + vars: + login: &login + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: True + validate_certs: False + tasks: + - name: Create MetroCluster + na_ontap_metrocluster: + <<: *login + dr_pairs: + - partner_node_name: rha17-a2 + node_name: rha17-b2 + partner_cluster_name: rha2-b2b1_siteB +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPMetroCluster(object): + ''' ONTAP metrocluster operations ''' + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present'], default='present'), + dr_pairs=dict(required=True, type='list', elements='dict', options=dict( + node_name=dict(required=True, type='str'), + partner_node_name=dict(required=True, type='str') + )), + partner_cluster_name=dict(required=True, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_metrocluster')) + + def get_metrocluster(self): + attrs = None + api = 'cluster/metrocluster' + options = {'fields': '*'} + message, error = self.rest_api.get(api, options) + if error: + self.module.fail_json(msg=error) + if message is not None: + local = message['local'] + if local['configuration_state'] != "not_configured": + attrs = { + 'configuration_state': local['configuration_state'], + 'partner_cluster_reachable': local['partner_cluster_reachable'], + 'partner_cluster_name': local['cluster']['name'] + } + return attrs + + def create_metrocluster(self): + api = 'cluster/metrocluster' + options = {} + dr_pairs = [] + for pair in self.parameters['dr_pairs']: + dr_pairs.append({'node': {'name': pair['node_name']}, + 'partner': {'name': pair['partner_node_name']}}) + partner_cluster = {'name': self.parameters['partner_cluster_name']} + data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster} + message, error = self.rest_api.post(api, data, options) + if error is not None: + self.module.fail_json(msg="%s" % error) + message, error = self.rest_api.wait_on_job(message['job']) + if error: + self.module.fail_json(msg="%s" % error) + + def apply(self): + current = self.get_metrocluster() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_metrocluster() + # Since there is no modify or delete, we will return no change + else: + self.module.fail_json(msg="Modify and Delete currently not support in API") + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + obj = NetAppONTAPMetroCluster() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py new file mode 100644 index 000000000..3794c9753 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_metrocluster_dr_group.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +""" +(c) 2020, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +module: na_ontap_metrocluster_dr_group +short_description: NetApp ONTAP manage MetroCluster DR Group +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 20.11.0 +author: NetApp Ansible Team (@carchi8py) +requirements: + - ONTAP >= 9.8 +description: + - Create/Delete MetroCluster DR Group + - Create only supports MCC IP + - Delete supports both MCC IP and MCC FC +options: + state: + choices: ['present', 'absent'] + description: + add or remove DR groups + default: present + type: str + dr_pairs: + description: disaster recovery pairs + type: list + required: true + elements: dict + suboptions: + node_name: + description: + - the name of the main node + required: true + type: str + partner_node_name: + description: + - the name of the main partner node + required: true + type: str + partner_cluster_name: + description: + - The name of the partner cluster + required: true + type: str +''' + +EXAMPLES = ''' +- + name: Manage MetroCluster DR group + hosts: localhost + collections: + - netapp.ontap + vars: + login: &login + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: True + validate_certs: False + tasks: + - name: Create MetroCluster DR group + na_ontap_metrocluster_dr_group: + <<: *login + dr_pairs: + - partner_name: carchi_cluster3_01 + node_name: carchi_cluster1_01 + partner_cluster_name: carchi_cluster3 + - name: Delete MetroCluster DR group + na_ontap_metrocluster_dr_group: + <<: *login + dr_pairs: + - partner_name: carchi_cluster3_01 + node_name: carchi_cluster1_01 + state: absent + partner_cluster_name: carchi_cluster3 +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + + +class NetAppONTAPMetroClusterDRGroup(object): + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(choices=['present', 'absent'], default='present'), + dr_pairs=dict(required=True, type='list', elements='dict', options=dict( + node_name=dict(required=True, type='str'), + partner_node_name=dict(required=True, type='str') + )), + partner_cluster_name=dict(required=True, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_metrocluster_dr_group', + version='9.8')) + + def get_dr_group(self): + return_attrs = None + for pair in self.parameters['dr_pairs']: + api = 'cluster/metrocluster/dr-groups' + options = {'fields': '*', + 'dr_pairs.node.name': pair['node_name'], + 'dr_pairs.partner.name': pair['partner_node_name'], + 'partner_cluster.name': self.parameters['partner_cluster_name']} + message, error = self.rest_api.get(api, options) + if error: + self.module.fail_json(msg=error) + if 'records' in message and message['num_records'] == 0: + continue + elif 'records' not in message or message['num_records'] != 1: + error = "Unexpected response from %s: %s" % (api, repr(message)) + self.module.fail_json(msg=error) + record = message['records'][0] + return_attrs = { + 'partner_cluster_name': record['partner_cluster']['name'], + 'dr_pairs': [], + 'id': record['id'] + } + for dr_pair in record['dr_pairs']: + return_attrs['dr_pairs'].append({'node_name': dr_pair['node']['name'], 'partner_node_name': dr_pair['partner']['name']}) + # if we have an return_dr_id we don't need to loop anymore + break + return return_attrs + + def get_dr_group_ids_from_nodes(self): + delete_ids = [] + for pair in self.parameters['dr_pairs']: + api = 'cluster/metrocluster/nodes' + options = {'fields': '*', + 'node.name': pair['node_name']} + message, error = self.rest_api.get(api, options) + if error: + self.module.fail_json(msg=error) + if 'records' in message and message['num_records'] == 0: + continue + elif 'records' not in message or message['num_records'] != 1: + error = "Unexpected response from %s: %s" % (api, repr(message)) + self.module.fail_json(msg=error) + record = message['records'][0] + if int(record['dr_group_id']) not in delete_ids: + delete_ids.append(int(record['dr_group_id'])) + return delete_ids + + def create_dr_group(self): + api = 'cluster/metrocluster/dr-groups' + dr_pairs = [] + for pair in self.parameters['dr_pairs']: + dr_pairs.append({'node': {'name': pair['node_name']}, + 'partner': {'name': pair['partner_node_name']}}) + partner_cluster = {'name': self.parameters['partner_cluster_name']} + data = {'dr_pairs': dr_pairs, 'partner_cluster': partner_cluster} + message, error = self.rest_api.post(api, data) + if error is not None: + self.module.fail_json(msg="%s" % error) + message, error = self.rest_api.wait_on_job(message['job']) + if error: + self.module.fail_json(msg="%s" % error) + + def delete_dr_groups(self, dr_ids): + for dr_id in dr_ids: + api = 'cluster/metrocluster/dr-groups/' + str(dr_id) + message, error = self.rest_api.delete(api) + if error: + self.module.fail_json(msg=error) + message, error = self.rest_api.wait_on_job(message['job']) + if error: + self.module.fail_json(msg="%s" % error) + + def apply(self): + current = self.get_dr_group() + delete_ids = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and current is None and self.parameters['state'] == 'absent': + # check if there is some FC group to delete + delete_ids = self.get_dr_group_ids_from_nodes() + if delete_ids: + cd_action = 'delete' + self.na_helper.changed = True + elif cd_action == 'delete': + delete_ids = [current['id']] + if cd_action and not self.module.check_mode: + if cd_action == 'create': + self.create_dr_group() + if cd_action == 'delete': + self.delete_dr_groups(delete_ids) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + obj = NetAppONTAPMetroClusterDRGroup() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py new file mode 100644 index 000000000..ed363692e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_motd.py @@ -0,0 +1,210 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# (c) 2018 Piotr Olczak +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_motd +author: + - Piotr Olczak (@dprts) + - NetApp Ansible Team (@carchi8py) +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +short_description: Setup motd +description: + - This module allows you to manipulate motd for a vserver + - It also allows to manipulate motd at the cluster level by using the cluster vserver (cserver) +version_added: 2.7.0 +options: + state: + description: + - If C(state=present) sets MOTD given in I(message) C(state=absent) removes it. + choices: ['present', 'absent'] + type: str + default: present + motd_message: + description: + - MOTD Text message. + - message is deprecated and will be removed to avoid a conflict with an Ansible internal variable. + type: str + default: '' + aliases: + - message + vserver: + description: + - The name of the SVM motd should be set for. + required: true + type: str + show_cluster_motd: + description: + - Set to I(false) if Cluster-level Message of the Day should not be shown + type: bool + default: True + +notes: + - This module is deprecated and only supports ZAPI. + - Please use netapp.ontap.na_ontap_login_messages both for ZAPI and REST. + +''' + +EXAMPLES = ''' + +- name: Set Cluster-Level MOTD + netapp.ontap.na_ontap_motd: + vserver: my_ontap_cluster + motd_message: "Cluster wide MOTD" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + state: present + https: true + +- name: Set MOTD for I(rhev_nfs_krb) SVM, do not show Cluster-Level MOTD + netapp.ontap.na_ontap_motd: + vserver: rhev_nfs_krb + motd_message: "Access to rhev_nfs_krb is also restricted" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + state: present + show_cluster_motd: False + https: true + +- name: Remove Cluster-Level MOTD + netapp.ontap.na_ontap_motd: + vserver: my_ontap_cluster + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + state: absent + https: true +''' + +RETURN = ''' + +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppONTAPMotd: + + def __init__(self): + argument_spec = netapp_utils.na_ontap_zapi_only_spec() + argument_spec.update(dict( + state=dict(required=False, type='str', default='present', choices=['present', 'absent']), + vserver=dict(required=True, type='str'), + motd_message=dict(default='', type='str', aliases=['message']), + show_cluster_motd=dict(default=True, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_replaces('na_ontap_login_messages', self.module) + + msg = 'netapp.ontap.na_ontap_motd is deprecated and only supports ZAPI. Please use netapp.ontap.na_ontap_login_messages.' + if self.parameters['use_rest'].lower() == 'never': + self.module.warn(msg) + else: + self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if 'message' in self.parameters: + self.module.warn('Error: "message" option conflicts with Ansible internal variable - please use "motd_message".') + + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def motd_get_iter(self): + """ + Compose NaElement object to query current motd + :return: NaElement object for vserver-motd-get-iter + """ + motd_get_iter = netapp_utils.zapi.NaElement('vserver-motd-get-iter') + query = netapp_utils.zapi.NaElement('query') + motd_info = netapp_utils.zapi.NaElement('vserver-motd-info') + motd_info.add_new_child('is-cluster-message-enabled', str(self.parameters['show_cluster_motd'])) + motd_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(motd_info) + motd_get_iter.add_child_elem(query) + return motd_get_iter + + def motd_get(self): + """ + Get current motd + :return: Dictionary of current motd details if query successful, else None + """ + motd_get_iter = self.motd_get_iter() + motd_result = {} + try: + result = self.server.invoke_successfully(motd_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching motd info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) > 0: + motd_info = result.get_child_by_name('attributes-list').get_child_by_name( + 'vserver-motd-info') + motd_result['motd_message'] = motd_info.get_child_content('message') + motd_result['motd_message'] = str(motd_result['motd_message']).rstrip() + motd_result['show_cluster_motd'] = motd_info.get_child_content('is-cluster-message-enabled') == 'true' + motd_result['vserver'] = motd_info.get_child_content('vserver') + return motd_result + return None + + def modify_motd(self): + motd_create = netapp_utils.zapi.NaElement('vserver-motd-modify-iter') + motd_create.add_new_child('message', self.parameters['motd_message']) + motd_create.add_new_child( + 'is-cluster-message-enabled', 'true' if self.parameters['show_cluster_motd'] is True else 'false') + query = netapp_utils.zapi.NaElement('query') + motd_info = netapp_utils.zapi.NaElement('vserver-motd-info') + motd_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(motd_info) + motd_create.add_child_elem(query) + try: + self.server.invoke_successfully(motd_create, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as err: + self.module.fail_json(msg="Error creating motd: %s" % (to_native(err)), exception=traceback.format_exc()) + return motd_create + + def apply(self): + """ + Applies action from playbook + """ + current = self.motd_get() + if self.parameters['state'] == 'absent': + # Just make sure it is empty + self.parameters['motd_message'] = '' + if current and current['motd_message'] == 'None': + current = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + self.modify_motd() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + motd_obj = NetAppONTAPMotd() + motd_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py new file mode 100644 index 000000000..3aa4f2df5 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_mappings.py @@ -0,0 +1,286 @@ +#!/usr/bin/python + +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +''' +na_ontap_name_mappings +''' + + +DOCUMENTATION = ''' +module: na_ontap_name_mappings +short_description: NetApp ONTAP name mappings +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 22.0.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete/Modify name mappings for an SVM on ONTAP. +options: + state: + description: + - Whether the specified name mappings should exist or not. + choices: ['present', 'absent'] + type: str + default: present + vserver: + description: + - Name of the vserver to use. + required: true + type: str + client_match: + description: + - Client workstation IP Address which is matched when searching for the pattern. + - Example '10.254.101.111/28' + - Client match value can be in any of the following formats, + - As an IPv4 address with a subnet mask expressed as a number of bits; for instance, 10.1.12.0/24 + - As an IPv6 address with a subnet mask expressed as a number of bits; for instance, fd20:8b1e:b255:4071::/64 + - As an IPv4 address with a network mask; for instance, 10.1.16.0/255.255.255.0 + - As a hostname + type: str + direction: + description: + - Direction in which the name mapping is applied. + - The possible values are, + krb_unix - Kerberos principal name to UNIX user name + win_unix - Windows user name to UNIX user name + unix_win - UNIX user name to Windows user name mapping + s3_unix - S3 user name to UNIX user name mapping + s3_win - S3 user name to Windows user name mapping + - s3_unix and s3_win requires ONTAP 9.12.1 or later. + choices: ['krb_unix', 'win_unix', 'unix_win', 's3_unix', 's3_win'] + required: true + type: str + index: + description: + - Position in the list of name mappings. + - Minimum value is 1 and maximum is 2147483647. + required: true + type: int + pattern: + description: + - Pattern used to match the name while searching for a name that can be used as a replacement. + - The pattern is a UNIX-style regular expression. + - Regular expressions are case-insensitive when mapping from Windows to UNIX, + and they are case-sensitive for mappings from Kerberos to UNIX and UNIX to Windows. + - Minimum length is 1 and maximum length is 256. + - Pattern should be unique for each index of vserver. + - Example ENGCIFS_AD_USER. + type: str + replacement: + description: + - The name that is used as a replacement, if the pattern associated with this entry matches. + - Minimum length is 1 and maximum length is 256. + - Example unix_user1. + type: str + from_index: + description: + - If no entry with index is found, it is created by reindexing the entry for from_index. + - If no entry is found for index and from_index, an error is reported. + - Minimum value is 1 and maximum is 2147483647. + - Requires ONTAP version 9.7 or later. + type: int + +''' + +EXAMPLES = ''' + - name: create name mappings configuration + netapp.ontap.na_ontap_name_mappings: + vserver: vserverName + direction: win_unix + index: 1 + pattern: ENGCIFS_AD_USER + replacement: unix_user + client_match: 10.254.101.111/28 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: modify name mappings configuration + netapp.ontap.na_ontap_name_mappings: + vserver: vserverName + direction: win_unix + index: 1 + pattern: ENGCIFS_AD_USERS + replacement: unix_user1 + client_match: 10.254.101.112/28 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Swap name mappings position + netapp.ontap.na_ontap_name_mappings: + vserver: vserverName + direction: win_unix + index: 1 + pattern: ENGCIFS_AD_USERS + replacement: unix_user1 + from_index: 2 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete name mappings configuration + netapp.ontap.na_ontap_name_mappings: + vserver: vserverName + direction: win_unix + index: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +''' + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapNameMappings: + """ object initialize and class methods """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + client_match=dict(required=False, type='str'), + direction=dict(required=True, type='str', choices=['krb_unix', 'win_unix', 'unix_win', 's3_unix', 's3_win']), + index=dict(required=True, type='int'), + from_index=dict(required=False, type='int'), + pattern=dict(required=False, type='str'), + replacement=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule(self) + self.parameters = self.na_helper.set_parameters(self.module.params) + self.svm_uuid = None + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_name_mappings', 9, 6) + self.rest_api.is_rest_supported_properties(self.parameters, None, [['from_index', (9, 7)]]) + if self.parameters['direction'] in ['s3_unix', 's3_win'] and not self.rest_api.meets_rest_minimum_version(True, 9, 12, 1): + self.module.fail_json(msg="Error: direction %s requires ONTAP 9.12.1 or later version." % self.parameters['direction']) + + def get_name_mappings_rest(self, index=None): + ''' + Retrieves the name mapping configuration for SVM with rest API. + ''' + if index is None: + index = self.parameters['index'] + query = {'svm.name': self.parameters.get('vserver'), + 'index': index, # the existing current index or from_index to be swapped + 'direction': self.parameters.get('direction'), # different directions can have same index + 'fields': 'svm.uuid,' + 'client_match,' + 'direction,' + 'index,' + 'pattern,' + 'replacement,'} + api = 'name-services/name-mappings' + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg=error) + if record: + self.svm_uuid = record['svm']['uuid'] + return { + 'pattern': self.na_helper.safe_get(record, ['pattern']), + 'direction': self.na_helper.safe_get(record, ['direction']), + 'replacement': self.na_helper.safe_get(record, ['replacement']), + 'client_match': record.get('client_match', None), + } + return None + + def create_name_mappings_rest(self): + """ + Creates name mappings for an SVM with REST API. + """ + body = {'svm.name': self.parameters.get('vserver'), + 'index': self.parameters.get('index'), + 'direction': self.parameters.get('direction'), + 'pattern': self.parameters.get('pattern'), + 'replacement': self.parameters.get('replacement')} + if 'client_match' in self.parameters: + body['client_match'] = self.parameters['client_match'] + api = 'name-services/name-mappings' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating name mappings rest: %s" % error) + + def modify_name_mappings_rest(self, modify=None, reindex=False): + """ + Updates the name mapping configuration of an SVM with rest API. + Swap the position with new position(new_index). + """ + body = {} + query = None + if modify: + for option in ['pattern', 'replacement', 'client_match']: + if option in modify: + body[option] = self.parameters[option] + # Cannot swap entries which have hostname or address configured. + # Delete and recreate the new entry at the specified position. + index = self.parameters['index'] + if reindex: + query = {'new_index': self.parameters.get('index')} + index = self.parameters['from_index'] + + api = 'name-services/name-mappings/%s/%s/%s' % (self.svm_uuid, self.parameters['direction'], index) + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query) + if error is not None: + self.module.fail_json(msg="Error on modifying name mappings rest: %s" % error) + + def delete_name_mappings_rest(self): + """ + Delete the name mapping configuration of an SVM with rest API. + """ + api = 'name-services/name-mappings/%s/%s/%s' % (self.svm_uuid, self.parameters['direction'], self.parameters['index']) + dummy, error = rest_generic.delete_async(self.rest_api, api, None) + if error is not None: + self.module.fail_json(msg="Error on deleting name mappings rest: %s" % error) + + def apply(self): + reindex = False + current = self.get_name_mappings_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + # Throws error when trying to swap with non existing index + if cd_action == 'create': + if self.parameters.get('from_index') is not None: + current = self.get_name_mappings_rest(self.parameters['from_index']) + if not current: + self.module.fail_json(msg="Error from_index entry does not exist") + reindex = True + cd_action = None + else: + # pattern and replacement are required when creating name mappings. + if not self.parameters.get('pattern') or not self.parameters.get('replacement'): + self.module.fail_json(msg="Error creating name mappings for an SVM, pattern and replacement are required in create.") + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_name_mappings_rest() + elif cd_action == 'delete': + self.delete_name_mappings_rest() + elif modify or reindex: + self.modify_name_mappings_rest(modify, reindex) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ Create object and call apply """ + mapping_obj = NetAppOntapNameMappings() + mapping_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py new file mode 100644 index 000000000..edd8accb1 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_name_service_switch.py @@ -0,0 +1,250 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete/Modify Name Service Switch. + - Deleting name service switch not supported in REST. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_name_service_switch +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified ns-switch should exist or not. + default: present + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + database_type: + description: + - Name services switch database. + choices: ['hosts','group', 'passwd', 'netgroup', 'namemap'] + required: true + type: str + sources: + description: + - Type of sources. + - Possible values include files,dns,ldap,nis. + type: list + elements: str + +short_description: "NetApp ONTAP Manage name service switch" +''' + +EXAMPLES = """ + - name: create name service database + netapp.ontap.na_ontap_name_service_switch: + state: present + database_type: namemap + sources: files,ldap + vserver: "{{ Vserver name }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + + - name: modify name service database sources + netapp.ontap.na_ontap_name_service_switch: + state: present + database_type: namemap + sources: files + vserver: "{{ Vserver name }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppONTAPNsswitch: + """ + Class with NVMe service methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + database_type=dict(required=True, type='str', choices=['hosts', 'group', 'passwd', 'netgroup', 'namemap']), + sources=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters.get('sources') is not None: + self.parameters['sources'] = [source.strip() for source in self.parameters['sources']] + if '' in self.parameters['sources']: + self.module.fail_json(msg="Error: Invalid value '' specified for sources") + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.svm_uuid = None + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_name_service_switch(self): + """ + get current name service switch config + :return: dict of current name service switch + """ + if self.use_rest: + return self.get_name_service_switch_rest() + nss_iter = netapp_utils.zapi.NaElement('nameservice-nsswitch-get-iter') + nss_info = netapp_utils.zapi.NaElement('namservice-nsswitch-config-info') + db_type = netapp_utils.zapi.NaElement('nameservice-database') + db_type.set_content(self.parameters['database_type']) + query = netapp_utils.zapi.NaElement('query') + nss_info.add_child_elem(db_type) + query.add_child_elem(nss_info) + nss_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(nss_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching name service switch info for %s: %s' % + (self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()) + return_value = None + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1: + nss_sources = result.get_child_by_name('attributes-list').get_child_by_name( + 'namservice-nsswitch-config-info').get_child_by_name('nameservice-sources') + # nameservice-sources will not present in result if the value is '-' + if nss_sources: + sources = [sources.get_content() for sources in nss_sources.get_children()] + return_value = {'sources': sources} + else: + return_value = {'sources': []} + return return_value + + def create_name_service_switch(self): + """ + create name service switch config + :return: None + """ + nss_create = netapp_utils.zapi.NaElement('nameservice-nsswitch-create') + nss_create.add_new_child('nameservice-database', self.parameters['database_type']) + nss_sources = netapp_utils.zapi.NaElement('nameservice-sources') + nss_create.add_child_elem(nss_sources) + for source in self.parameters['sources']: + nss_sources.add_new_child('nss-source-type', source) + try: + self.server.invoke_successfully(nss_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error on creating name service switch config on vserver %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def delete_name_service_switch(self): + """ + delete name service switch + :return: None + """ + nss_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'nameservice-nsswitch-destroy', **{'nameservice-database': self.parameters['database_type']}) + try: + self.server.invoke_successfully(nss_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error on deleting name service switch config on vserver %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def modify_name_service_switch(self, modify): + """ + modify name service switch + :param modify: dict of modify attributes + :return: None + """ + if self.use_rest: + return self.modify_name_service_switch_rest() + nss_modify = netapp_utils.zapi.NaElement('nameservice-nsswitch-modify') + nss_modify.add_new_child('nameservice-database', self.parameters['database_type']) + nss_sources = netapp_utils.zapi.NaElement('nameservice-sources') + nss_modify.add_child_elem(nss_sources) + if 'sources' in modify: + for source in self.parameters['sources']: + nss_sources.add_new_child('nss-source-type', source) + try: + self.server.invoke_successfully(nss_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error on modifying name service switch config on vserver %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def get_name_service_switch_rest(self): + record, error = rest_vserver.get_vserver(self.rest_api, self.parameters['vserver'], 'nsswitch,uuid') + if error: + self.module.fail_json(msg='Error fetching name service switch info for %s: %s' % + (self.parameters['vserver'], to_native(error))) + if not record: + self.module.fail_json(msg="Error: Specified vserver %s not found" % self.parameters['vserver']) + self.svm_uuid = record['uuid'] + # if database type is already deleted by ZAPI call, REST will not have the database key. + # setting it to [] help to set the value in REST patch call. + database_type = self.na_helper.safe_get(record, ['nsswitch', self.parameters['database_type']]) + return {'sources': database_type if database_type else []} + + def modify_name_service_switch_rest(self): + api = 'svm/svms' + body = { + 'nsswitch': { + self.parameters['database_type']: self.parameters['sources'] + } + } + dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body) + if error: + self.module.fail_json(msg='Error on modifying name service switch config on vserver %s: %s' + % (self.parameters['vserver'], to_native(error))) + + def apply(self): + current = self.get_name_service_switch() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'delete' and self.use_rest: + self.module.fail_json(msg="Error: deleting name service switch not supported in REST.") + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_name_service_switch() + elif cd_action == 'delete': + self.delete_name_service_switch() + elif modify: + self.modify_name_service_switch(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Applyoperations from playbook''' + nss = NetAppONTAPNsswitch() + nss.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py new file mode 100644 index 000000000..3df785861 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ndmp.py @@ -0,0 +1,392 @@ +#!/usr/bin/python +""" this is ndmp module + + (c) 2019, NetApp, Inc + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + + +DOCUMENTATION = ''' +--- +module: na_ontap_ndmp +short_description: NetApp ONTAP NDMP services configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Modify NDMP Services. + +options: + + vserver: + description: + - Name of the vserver. + required: true + type: str + + abort_on_disk_error: + description: + - Enable abort on disk error. + type: bool + + authtype: + description: + - Authentication type. + type: list + elements: str + + backup_log_enable: + description: + - Enable backup log. + type: bool + + data_port_range: + description: + - Data port range. Modification not supported for data Vservers. + type: str + + debug_enable: + description: + - Enable debug. + type: bool + + debug_filter: + description: + - Debug filter. + type: str + + dump_detailed_stats: + description: + - Enable logging of VM stats for dump. + type: bool + + dump_logical_find: + description: + - Enable logical find for dump. + type: str + + enable: + description: + - Enable NDMP on vserver. + type: bool + + fh_dir_retry_interval: + description: + - FH throttle value for dir. + type: int + + fh_node_retry_interval: + description: + - FH throttle value for node. + type: int + + ignore_ctime_enabled: + description: + - Ignore ctime. + type: bool + + is_secure_control_connection_enabled: + description: + - Is secure control connection enabled. + type: bool + + offset_map_enable: + description: + - Enable offset map. + type: bool + + per_qtree_exclude_enable: + description: + - Enable per qtree exclusion. + type: bool + + preferred_interface_role: + description: + - Preferred interface role. + type: list + elements: str + + restore_vm_cache_size: + description: + - Restore VM file cache size. Value range [4-1024] + type: int + + secondary_debug_filter: + description: + - Secondary debug filter. + type: str + + tcpnodelay: + description: + - Enable TCP nodelay. + type: bool + + tcpwinsize: + description: + - TCP window size. + type: int +''' + +EXAMPLES = ''' + - name: modify ndmp + na_ontap_ndmp: + vserver: ansible + hostname: "{{ hostname }}" + abort_on_disk_error: true + authtype: plaintext,challenge + backup_log_enable: true + data_port_range: 8000-9000 + debug_enable: true + debug_filter: filter + dump_detailed_stats: true + dump_logical_find: default + enable: true + fh_dir_retry_interval: 100 + fh_node_retry_interval: 100 + ignore_ctime_enabled: true + is_secure_control_connection_enabled: true + offset_map_enable: true + per_qtree_exclude_enable: true + preferred_interface_role: node_mgmt,intercluster + restore_vm_cache_size: 1000 + secondary_debug_filter: filter + tcpnodelay: true + tcpwinsize: 10000 + username: user + password: pass + https: False +''' + +RETURN = ''' +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPNdmp(object): + ''' + modify vserver cifs security + ''' + def __init__(self): + self.use_rest = False + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.modifiable_options = dict( + abort_on_disk_error=dict(required=False, type='bool'), + authtype=dict(required=False, type='list', elements='str'), + backup_log_enable=dict(required=False, type='bool'), + data_port_range=dict(required=False, type='str'), + debug_enable=dict(required=False, type='bool'), + debug_filter=dict(required=False, type='str'), + dump_detailed_stats=dict(required=False, type='bool'), + dump_logical_find=dict(required=False, type='str'), + enable=dict(required=False, type='bool'), + fh_dir_retry_interval=dict(required=False, type='int'), + fh_node_retry_interval=dict(required=False, type='int'), + ignore_ctime_enabled=dict(required=False, type='bool'), + is_secure_control_connection_enabled=dict(required=False, type='bool'), + offset_map_enable=dict(required=False, type='bool'), + per_qtree_exclude_enable=dict(required=False, type='bool'), + preferred_interface_role=dict(required=False, type='list', elements='str'), + restore_vm_cache_size=dict(required=False, type='int'), + secondary_debug_filter=dict(required=False, type='str'), + tcpnodelay=dict(required=False, type='bool'), + tcpwinsize=dict(required=False, type='int') + ) + self.argument_spec.update(dict( + vserver=dict(required=True, type='str') + )) + + self.argument_spec.update(self.modifiable_options) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # API should be used for ONTAP 9.6 or higher, ZAPI for lower version + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['abort_on_disk_error', 'backup_log_enable', 'data_port_range', + 'debug_enable', 'debug_filter', 'dump_detailed_stats', + 'dump_logical_find', 'fh_dir_retry_interval', 'fh_node_retry_interval', + 'ignore_ctime_enabled', 'is_secure_control_connection_enabled', + 'offset_map_enable', 'per_qtree_exclude_enable', 'preferred_interface_role', + 'restore_vm_cache_size', 'secondary_debug_filter', 'tcpnodelay', 'tcpwinsize'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_ndmp_svm_uuid(self): + + """ + Get a svm's UUID + :return: uuid of the node + """ + params = {'svm.name': self.parameters['vserver']} + api = "protocols/ndmp/svms" + message, error = self.rest_api.get(api, params) + if error is not None: + self.module.fail_json(msg=error) + if 'records' in message and len(message['records']) == 0: + self.module.fail_json(msg='Error fetching uuid for vserver %s: ' % (self.parameters['vserver'])) + if len(message.keys()) == 0: + error = "No information collected from %s: %s" % (api, repr(message)) + self.module.fail_json(msg=error) + elif 'records' not in message: + error = "Unexpected response from %s: %s" % (api, repr(message)) + self.module.fail_json(msg=error) + return message['records'][0]['svm']['uuid'] + + def ndmp_get_iter(self, uuid=None): + """ + get current vserver ndmp attributes. + :return: a dict of ndmp attributes. + """ + if self.use_rest: + data = dict() + params = {'fields': 'authentication_types,enabled'} + api = '/protocols/ndmp/svms/' + uuid + message, error = self.rest_api.get(api, params) + data['enable'] = message['enabled'] + data['authtype'] = message['authentication_types'] + + if error: + self.module.fail_json(msg=error) + return data + else: + ndmp_get = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-get-iter') + query = netapp_utils.zapi.NaElement('query') + ndmp_info = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-info') + ndmp_info.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(ndmp_info) + ndmp_get.add_child_elem(query) + ndmp_details = dict() + try: + result = self.server.invoke_successfully(ndmp_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching ndmp from %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + ndmp_attributes = result.get_child_by_name('attributes-list').get_child_by_name('ndmp-vserver-attributes-info') + self.get_ndmp_details(ndmp_details, ndmp_attributes) + return ndmp_details + + def get_ndmp_details(self, ndmp_details, ndmp_attributes): + """ + :param ndmp_details: a dict of current ndmp. + :param ndmp_attributes: ndmp returned from api call in xml format. + :return: None + """ + for option in self.modifiable_options: + option_type = self.modifiable_options[option]['type'] + if option_type == 'bool': + ndmp_details[option] = self.str_to_bool(ndmp_attributes.get_child_content(self.attribute_to_name(option))) + elif option_type == 'int': + ndmp_details[option] = int(ndmp_attributes.get_child_content(self.attribute_to_name(option))) + elif option_type == 'list': + child_list = ndmp_attributes.get_child_by_name(self.attribute_to_name(option)) + values = [child.get_content() for child in child_list.get_children()] + ndmp_details[option] = values + else: + ndmp_details[option] = ndmp_attributes.get_child_content(self.attribute_to_name(option)) + + def modify_ndmp(self, modify): + """ + :param modify: A list of attributes to modify + :return: None + """ + if self.use_rest: + ndmp = dict() + uuid = self.get_ndmp_svm_uuid() + if self.parameters.get('enable'): + ndmp['enabled'] = self.parameters['enable'] + if self.parameters.get('authtype'): + ndmp['authentication_types'] = self.parameters['authtype'] + api = "protocols/ndmp/svms/" + uuid + dummy, error = self.rest_api.patch(api, ndmp) + if error: + self.module.fail_json(msg=error) + else: + + ndmp_modify = netapp_utils.zapi.NaElement('ndmp-vserver-attributes-modify') + for attribute in modify: + if attribute == 'authtype': + authtypes = netapp_utils.zapi.NaElement('authtype') + types = self.parameters['authtype'] + for authtype in types: + authtypes.add_new_child('ndmpd-authtypes', authtype) + ndmp_modify.add_child_elem(authtypes) + elif attribute == 'preferred_interface_role': + preferred_interface_roles = netapp_utils.zapi.NaElement('preferred-interface-role') + roles = self.parameters['preferred_interface_role'] + for role in roles: + preferred_interface_roles.add_new_child('netport-role', role) + ndmp_modify.add_child_elem(preferred_interface_roles) + else: + ndmp_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute])) + try: + self.server.invoke_successfully(ndmp_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error modifying ndmp on %s: %s' + % (self.parameters['vserver'], to_native(exc)), + exception=traceback.format_exc()) + + @staticmethod + def attribute_to_name(attribute): + return str.replace(attribute, '_', '-') + + @staticmethod + def str_to_bool(value): + return value == 'true' + + def apply(self): + """Call modify operations.""" + uuid = None + if self.use_rest: + # we only have the svm name, we need to the the uuid for the svm + uuid = self.get_ndmp_svm_uuid() + current = self.ndmp_get_iter(uuid=uuid) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if modify: + self.modify_ndmp(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppONTAPNdmp() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py new file mode 100644 index 000000000..6ba4083e5 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_ifgrp.py @@ -0,0 +1,546 @@ +#!/usr/bin/python + +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_net_ifgrp +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_net_ifgrp +short_description: NetApp Ontap modify network interface group +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create, modify ports, destroy the network interface group +options: + state: + description: + - Whether the specified network interface group should exist or not. + choices: ['present', 'absent'] + type: str + default: present + + distribution_function: + description: + - Specifies the traffic distribution function for the ifgrp. + choices: ['mac', 'ip', 'sequential', 'port'] + type: str + + name: + description: + - Specifies the interface group name. + - Not supported with REST, use C(ports) or C(from_lag_ports). + - Required with ZAPI. + type: str + + mode: + description: + - Specifies the link policy for the ifgrp. + type: str + + node: + description: + - Specifies the name of node. + required: true + type: str + + ports: + aliases: + - port + description: + - List of expected ports to be present in the interface group. + - If a port is present in this list, but not on the target, it will be added. + - If a port is not in the list, but present on the target, it will be removed. + - Make sure the list contains all ports you want to see on the target. + - With REST, ports in this list are used to find the current LAG port. + - If LAG is not found or only partial port matches, then C(from_lag_port) are used to get the current LAG. + - With REST, when C(state=absent) is set, all of the ports in ifgrp should be provided to delete it. + - Example C(ports=['e0c','e0a']) will delete ifgrp that has ports C(['e0c','e0a']). + version_added: 2.8.0 + type: list + elements: str + + from_lag_ports: + description: + - Only supported with REST and is ignored with ZAPI. + - Specify all the ports to find current LAG port. + - Ignored if LAG found with exact match of C(ports). + - Example if current LAG has ports C(['e0c','e0d']) and C(ports=['e0c','e0d']), then from_lag_ports will be ignored. + - If LAG not found with C(ports), then ports in this list are used to find the current LAG. + - Ports in this list are used only for finding current LAG, provide exact match of all the ports in the current LAG. + - Ignored when C(state=absent). + version_added: 2.14.0 + type: list + elements: str + + broadcast_domain: + description: + - Specify the broadcast_domain name. + - Only supported with REST and is ignored with ZAPI. + - Required with ONTAP 9.6 and 9.7, but optional with 9.8 or later. + type: str + version_added: 21.14.0 + + ipspace: + description: + - Specify the ipspace for the broadcast domain. + - Only supported with REST and is ignored with ZAPI. + - Required with ONTAP 9.6 and 9.7, but optional with 9.8 or later. + type: str + version_added: 21.14.0 +""" + +EXAMPLES = """ + - name: create ifgrp + netapp.ontap.na_ontap_net_ifgrp: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + distribution_function: ip + name: a0c + ports: [e0a] + mode: multimode + node: "{{ Vsim node name }}" + - name: modify ports in an ifgrp + netapp.ontap.na_ontap_net_ifgrp: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + distribution_function: ip + name: a0c + port: [e0a, e0c] + mode: multimode + node: "{{ Vsim node name }}" + - name: delete ifgrp + netapp.ontap.na_ontap_net_ifgrp: + state: absent + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: a0c + node: "{{ Vsim node name }}" + - name: create ifgrp - REST + netapp.ontap.na_ontap_net_ifgrp: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + distribution_function: ip + ports: [e0a,e0b] + mode: multimode + node: "{{ Vsim node name }}" + broadcast_domain: Default + ipspace: Default + - name: Remove e0a and add port e0d to above created lag REST + netapp.ontap.na_ontap_net_ifgrp: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + from_lag_ports: [a0a,e0b] + ports: [e0b,e0d] + node: "{{ Vsim node name }}" + - name: Add e0a to lag that has port e0b e0d REST + netapp.ontap.na_ontap_net_ifgrp: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + distribution_function: ip + ports: [e0b,e0d,e0a] + mode: multimode + node: "{{ Vsim node name }}" + - name: Modify broadcast_domain and ipspace REST + netapp.ontap.na_ontap_net_ifgrp: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + broadcast_domain: test + ipspace: test + ports: [e0b,e0d,e0a] + node: "{{ Vsim node name }}" + - name: Delete LAG with exact match of ports + netapp.ontap.na_ontap_net_ifgrp: + state: absent + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + ports: [e0b,e0d,e0a] + node: "{{ Vsim node name }}" +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapIfGrp: + """ + Create, Modifies and Destroys a IfGrp + """ + def __init__(self): + """ + Initialize the Ontap IfGrp class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + distribution_function=dict(required=False, type='str', choices=['mac', 'ip', 'sequential', 'port']), + name=dict(required=False, type='str'), + mode=dict(required=False, type='str'), + node=dict(required=True, type='str'), + ports=dict(required=False, type='list', elements='str', aliases=["port"]), + from_lag_ports=dict(required=False, type='list', elements='str'), + broadcast_domain=dict(required=False, type='str'), + ipspace=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['distribution_function', 'mode']) + ], + required_together=[['broadcast_domain', 'ipspace']], + supports_check_mode=True + ) + + self.current_records = [] + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + + # if rest and use_rest: auto and name is present, revert to zapi + # if rest and use_rest: always and name is present, throw error. + unsupported_rest_properties = ['name'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if self.use_rest: + # if rest and ports is not present, throw error as ports is a required field with REST + if 'ports' not in self.parameters: + error_msg = "Error: ports is a required field with REST" + self.module.fail_json(msg=error_msg) + + required_options = ['broadcast_domain', 'ipspace'] + min_ontap_98 = self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0) + if not min_ontap_98 and not any(x in self.parameters for x in required_options): + error_msg = "'%s' are mandatory fields with ONTAP 9.6 and 9.7" % ', '.join(required_options) + self.module.fail_json(msg=error_msg) + else: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if 'name' not in self.parameters: + self.module.fail_json("Error: name is a required field with ZAPI.") + if 'broadcast_domain' in self.parameters or 'ipspace' in self.parameters or 'from_lag_ports' in self.parameters: + msg = 'Using ZAPI and ignoring options - broadcast_domain, ipspace and from_lag_ports' + self.module.warn(msg) + self.parameters.pop('broadcast_domain', None) + self.parameters.pop('ipspace', None) + self.parameters.pop('from_lag_ports', None) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_if_grp(self): + """ + Return details about the if_group + :param: + name : Name of the if_group + + :return: Details about the if_group. None if not found. + :rtype: dict + """ + if_group_iter = netapp_utils.zapi.NaElement('net-port-get-iter') + if_group_info = netapp_utils.zapi.NaElement('net-port-info') + if_group_info.add_new_child('port', self.parameters['name']) + if_group_info.add_new_child('port-type', 'if_group') + if_group_info.add_new_child('node', self.parameters['node']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(if_group_info) + if_group_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(if_group_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting if_group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + return_value = None + + if result.get_child_by_name('num-records') and int(result['num-records']) >= 1: + if_group_attributes = result['attributes-list']['net-port-info'] + return_value = { + 'name': if_group_attributes['port'], + 'distribution_function': if_group_attributes['ifgrp-distribution-function'], + 'mode': if_group_attributes['ifgrp-mode'], + 'node': if_group_attributes['node'], + } + return return_value + + def get_if_grp_rest(self, ports, allow_partial_match): + api = 'network/ethernet/ports' + query = { + 'type': 'lag', + 'node.name': self.parameters['node'], + } + fields = 'name,node,uuid,broadcast_domain,lag' + error = None + if not self.current_records: + self.current_records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + if self.current_records: + current_ifgrp = self.get_if_grp_current(self.current_records, ports) + if current_ifgrp: + exact_match = self.check_exact_match(ports, current_ifgrp['ports']) + if exact_match or allow_partial_match: + return current_ifgrp, exact_match + return None, None + + def check_exact_match(self, desired_ports, current_ifgrp): + matched = set(desired_ports) == set(current_ifgrp) + if not matched: + self.rest_api.log_debug(0, "found LAG with partial match of ports: %s but current is %s" % (desired_ports, current_ifgrp)) + return matched + + def get_if_grp_current(self, records, ports): + desired_ifgrp_in_current = [] + for record in records: + if 'member_ports' in record['lag']: + current_port_list = [port['name'] for port in record['lag']['member_ports']] + for current_port in current_port_list: + if current_port in ports: + desired_ifgrp_in_current.append(self.get_if_grp_detail(record, current_port_list)) + break + # if ports are in different LAGs and state is absent, return None + if len(desired_ifgrp_in_current) > 1 and self.parameters['state'] == 'present': + error_msg = "'%s' are in different LAGs" % ', '.join(ports) + self.module.fail_json(msg=error_msg) + elif len(desired_ifgrp_in_current) == 1: + return desired_ifgrp_in_current[0] + return None + + def get_if_grp_detail(self, record, current_port_list): + current = { + 'node': record['node']['name'], + 'uuid': record['uuid'], + 'ports': current_port_list + } + if record.get('broadcast_domain'): + current['broadcast_domain'] = record['broadcast_domain']['name'] + current['ipspace'] = record['broadcast_domain']['ipspace']['name'] + return current + + def get_if_grp_ports(self): + """ + Return ports of the if_group + :param: + name : Name of the if_group + :return: Ports of the if_group. None if not found. + :rtype: dict + """ + if_group_iter = netapp_utils.zapi.NaElement('net-port-ifgrp-get') + if_group_iter.add_new_child('ifgrp-name', self.parameters['name']) + if_group_iter.add_new_child('node', self.parameters['node']) + try: + result = self.server.invoke_successfully(if_group_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting if_group ports %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + port_list = [] + if result.get_child_by_name('attributes'): + if_group_attributes = result['attributes']['net-ifgrp-info'] + if if_group_attributes.get_child_by_name('ports'): + ports = if_group_attributes.get_child_by_name('ports').get_children() + for each in ports: + port_list.append(each.get_content()) + return {'ports': port_list} + + def create_if_grp(self): + """ + Creates a new ifgrp + """ + if self.use_rest: + return self.create_if_grp_rest() + route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-create") + route_obj.add_new_child("distribution-function", self.parameters['distribution_function']) + route_obj.add_new_child("ifgrp-name", self.parameters['name']) + route_obj.add_new_child("mode", self.parameters['mode']) + route_obj.add_new_child("node", self.parameters['node']) + try: + self.server.invoke_successfully(route_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating if_group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if self.parameters.get('ports') is not None: + for port in self.parameters.get('ports'): + self.add_port_to_if_grp(port) + + def create_if_grp_rest(self): + api = 'network/ethernet/ports' + body = { + 'type': 'lag', + 'node': {'name': self.parameters['node']}, + 'lag': { + "mode": self.parameters['mode'], + "distribution_policy": self.parameters['distribution_function'] + } + } + if self.parameters.get('ports') is not None: + body['lag']['member_ports'] = self.build_member_ports() + if 'broadcast_domain' in self.parameters: + body['broadcast_domain'] = {'name': self.parameters['broadcast_domain']} + body['broadcast_domain']['ipspace'] = {'name': self.parameters['ipspace']} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg=error) + + def delete_if_grp(self, uuid=None): + """ + Deletes a ifgrp + """ + if self.use_rest: + api = 'network/ethernet/ports' + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid) + if error: + self.module.fail_json(msg=error) + else: + route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-destroy") + route_obj.add_new_child("ifgrp-name", self.parameters['name']) + route_obj.add_new_child("node", self.parameters['node']) + try: + self.server.invoke_successfully(route_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting if_group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def add_port_to_if_grp(self, port): + """ + adds port to a ifgrp + """ + route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-add-port") + route_obj.add_new_child("ifgrp-name", self.parameters['name']) + route_obj.add_new_child("port", port) + route_obj.add_new_child("node", self.parameters['node']) + try: + self.server.invoke_successfully(route_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding port %s to if_group %s: %s' % + (port, self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_ports(self, current_ports): + add_ports = set(self.parameters['ports']) - set(current_ports) + remove_ports = set(current_ports) - set(self.parameters['ports']) + for port in add_ports: + self.add_port_to_if_grp(port) + for port in remove_ports: + self.remove_port_to_if_grp(port) + + def modify_ports_rest(self, modify, uuid): + api = 'network/ethernet/ports' + body = {} + if 'ports' in modify: + member_ports = self.build_member_ports() + body['lag'] = {'member_ports': member_ports} + if 'broadcast_domain' in modify or 'ipspace' in modify: + broadcast_domain = modify['broadcast_domain'] if 'broadcast_domain' in modify else self.parameters['broadcast_domain'] + ipspace = modify['ipspace'] if 'ipspace' in modify else self.parameters['ipspace'] + body['broadcast_domain'] = {'name': broadcast_domain} + body['broadcast_domain']['ipspace'] = {'name': ipspace} + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg=error) + + def build_member_ports(self): + member_ports = [] + for port in self.parameters['ports']: + port_detail = {'name': port, 'node': {'name': self.parameters['node']}} + member_ports.append(port_detail) + return member_ports + + def remove_port_to_if_grp(self, port): + """ + removes port from a ifgrp + """ + route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-remove-port") + route_obj.add_new_child("ifgrp-name", self.parameters['name']) + route_obj.add_new_child("port", port) + route_obj.add_new_child("node", self.parameters['node']) + try: + self.server.invoke_successfully(route_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing port %s to if_group %s: %s' % + (port, self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + # for a LAG, rename is equivalent to adding/removing ports from an existing LAG. + current, exact_match, modify, rename = None, True, None, None + if not self.use_rest: + current = self.get_if_grp() + elif self.use_rest: + current, exact_match = self.get_if_grp_rest(self.parameters.get('ports'), allow_partial_match=True) + cd_action = self.na_helper.get_cd_action(current if exact_match else None, self.parameters) + if cd_action == 'create' and self.use_rest: + # if we could not find a lag, or only a lag with a partial match, do a new query using from_lag_ports. + if self.parameters.get('from_lag_ports') is not None: + from_ifgrp, dummy = self.get_if_grp_rest(self.parameters['from_lag_ports'], allow_partial_match=False) + if not from_ifgrp: + error_msg = "Error: cannot find LAG matching from_lag_ports: '%s'." % self.parameters['from_lag_ports'] + self.module.fail_json(msg=error_msg) + rename = True + current = from_ifgrp + # if we have a partial match with an existing LAG, we will update the ports. + elif not exact_match and current: + rename = True + if rename: + cd_action = None + if cd_action is None and self.parameters['state'] == 'present': + # with rest, current will have the port details + current_ports = self.get_if_grp_ports() if not self.use_rest else current + modify = self.na_helper.get_modified_attributes(current_ports, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + uuid = current['uuid'] if current and self.use_rest else None + if cd_action == 'create': + self.create_if_grp() + elif cd_action == 'delete': + self.delete_if_grp(uuid) + elif modify: + if self.use_rest: + self.modify_ports_rest(modify, uuid) + else: + self.modify_ports(current_ports['ports']) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap Net Route object and runs the correct play task + """ + obj = NetAppOntapIfGrp() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py new file mode 100644 index 000000000..e8f045103 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_port.py @@ -0,0 +1,309 @@ +#!/usr/bin/python + +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +''' +na_ontap_net_port +''' + +DOCUMENTATION = """ +module: na_ontap_net_port +short_description: NetApp ONTAP network ports. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Modify a ONTAP network port. +options: + state: + description: + - Whether the specified net port should exist or not. + choices: ['present'] + type: str + default: present + node: + description: + - Specifies the name of node. + required: true + type: str + ports: + aliases: + - port + description: + - Specifies the name of port(s). + required: true + type: list + elements: str + mtu: + description: + - Specifies the maximum transmission unit (MTU) reported by the port. + - Not supported with REST. + type: int + autonegotiate_admin: + description: + - Enables or disables Ethernet auto-negotiation of speed, + duplex and flow control. + - Not supported with REST. + type: bool + duplex_admin: + description: + - Specifies the user preferred duplex setting of the port. + - Valid values auto, half, full + - Not supported with REST. + type: str + speed_admin: + description: + - Specifies the user preferred speed setting of the port. + - Not supported with REST. + type: str + flowcontrol_admin: + description: + - Specifies the user preferred flow control setting of the port. + - Not supported with REST. + type: str + ipspace: + description: + - Specifies the port's associated IPspace name. + - The 'Cluster' ipspace is reserved for cluster ports. + - Not supported with REST. + - use netapp.ontap.na_ontap_ports to modify ipspace with REST. + type: str + up_admin: + description: + - Enables or disables the port. + type: bool + version_added: 21.8.0 +""" + +EXAMPLES = """ + - name: Modify Net Port + netapp.ontap.na_ontap_net_port: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + node: "{{ node_name }}" + ports: e0d,e0c + autonegotiate_admin: true + up_admin: true + mtu: 1500 +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapNetPort: + """ + Modify a Net port + """ + + def __init__(self): + """ + Initialize the Ontap Net Port Class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + node=dict(required=True, type="str"), + ports=dict(required=True, type='list', elements='str', aliases=['port']), + mtu=dict(required=False, type="int", default=None), + autonegotiate_admin=dict(required=False, type="bool", default=None), + up_admin=dict(required=False, type="bool", default=None), + duplex_admin=dict(required=False, type="str", default=None), + speed_admin=dict(required=False, type="str", default=None), + flowcontrol_admin=dict(required=False, type="str", default=None), + ipspace=dict(required=False, type="str", default=None) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['mtu', 'autonegotiate_admin', 'duplex_admin', 'speed_admin', 'flowcontrol_admin', 'ipspace'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.set_playbook_zapi_key_map() + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_string_keys = { + 'duplex_admin': 'administrative-duplex', + 'speed_admin': 'administrative-speed', + 'flowcontrol_admin': 'administrative-flowcontrol', + 'ipspace': 'ipspace' + } + self.na_helper.zapi_bool_keys = { + 'up_admin': 'is-administrative-up', + 'autonegotiate_admin': 'is-administrative-auto-negotiate', + } + self.na_helper.zapi_int_keys = { + 'mtu': 'mtu', + } + + def get_net_port(self, port): + """ + Return details about the net port + :param: port: Name of the port + :return: Dictionary with current state of the port. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_net_port_rest(port) + net_port_get = netapp_utils.zapi.NaElement('net-port-get-iter') + attributes = { + 'query': { + 'net-port-info': { + 'node': self.parameters['node'], + 'port': port + } + } + } + net_port_get.translate_struct(attributes) + + try: + result = self.server.invoke_successfully(net_port_get, True) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + port_info = result['attributes-list']['net-port-info'] + port_details = dict() + else: + return None + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting net ports for %s: %s' % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + + for item_key, zapi_key in self.na_helper.zapi_bool_keys.items(): + port_details[item_key] = self.na_helper.get_value_for_bool(from_zapi=True, value=port_info.get_child_content(zapi_key)) + for item_key, zapi_key in self.na_helper.zapi_int_keys.items(): + port_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True, value=port_info.get_child_content(zapi_key)) + for item_key, zapi_key in self.na_helper.zapi_string_keys.items(): + port_details[item_key] = port_info.get_child_content(zapi_key) + return port_details + + def get_net_port_rest(self, port): + api = 'network/ethernet/ports' + query = { + 'name': port, + 'node.name': self.parameters['node'], + } + fields = 'name,node,uuid,enabled' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + if record: + current = { + 'name': record['name'], + 'node': record['node']['name'], + 'uuid': record['uuid'], + 'up_admin': record['enabled'] + } + return current + return None + + def modify_net_port(self, port, modify): + """ + Modify a port + + :param port: Name of the port + :param modify: dict with attributes to be modified + :return: None + """ + if self.use_rest: + return self.modify_net_port_rest(port, modify) + + def get_zapi_key_and_value(key, value): + zapi_key = self.na_helper.zapi_string_keys.get(key) + if zapi_key is not None: + return zapi_key, value + zapi_key = self.na_helper.zapi_bool_keys.get(key) + if zapi_key is not None: + return zapi_key, self.na_helper.get_value_for_bool(from_zapi=False, value=value) + zapi_key = self.na_helper.zapi_int_keys.get(key) + if zapi_key is not None: + return zapi_key, self.na_helper.get_value_for_int(from_zapi=False, value=value) + raise KeyError(key) + + port_modify = netapp_utils.zapi.NaElement('net-port-modify') + port_attributes = {'node': self.parameters['node'], 'port': port} + for key, value in modify.items(): + zapi_key, value = get_zapi_key_and_value(key, value) + port_attributes[zapi_key] = value + port_modify.translate_struct(port_attributes) + try: + self.server.invoke_successfully(port_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying net ports for %s: %s' % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + + def modify_net_port_rest(self, uuid, modify): + """ + Modify broadcast domain, ipspace and enable/disable port + """ + api = 'network/ethernet/ports' + body = {'enabled': modify['up_admin']} + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg=error) + + def apply(self): + """ + Run Module based on play book + """ + # Run the task for all ports in the list of 'ports' + missing_ports = list() + modified = dict() + for port in self.parameters['ports']: + current = self.get_net_port(port) + if current is None: + missing_ports.append(port) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + modified[port] = modify + if modify and not self.module.check_mode: + port = current['uuid'] if self.use_rest else port + self.modify_net_port(port, modify) + if missing_ports: + plural, suffix = '', '.' + if len(missing_ports) == len(self.parameters['ports']): + suffix = ' - check node name.' + if len(missing_ports) > 1: + plural = 's' + self.module.fail_json(changed=self.na_helper.changed, modify=modified, + msg='Error: port%s: %s not found on node: %s%s' + % (plural, ', '.join(missing_ports), self.parameters['node'], suffix)) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modified) + self.module.exit_json(**result) + + +def main(): + """ + Create the NetApp Ontap Net Port Object and modify it + """ + obj = NetAppOntapNetPort() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py new file mode 100644 index 000000000..9881755b3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_routes.py @@ -0,0 +1,354 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_net_routes +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_net_routes +short_description: NetApp ONTAP network routes +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Modify ONTAP network routes. +options: + state: + description: + - Whether you want to create or delete a network route. + choices: ['present', 'absent'] + type: str + default: present + vserver: + description: + - The name of the vserver. + - Required when using ZAPI. + - When using REST, omit this parameter for cluster scoped routes, or set it to NULL. + type: str + destination: + description: + - Specify the route destination. + - Example 10.7.125.5/20, fd20:13::/64. + required: true + type: str + gateway: + description: + - Specify the route gateway. + - Example 10.7.125.1, fd20:13::1. + required: true + type: str + metric: + description: + - Specify the route metric. If this field is not provided, ONTAP will default to 20. + - Supported from ONTAP 9.11.0 in REST. + - With REST, trying to modify destination or gateway will also reset metric to 20 in ONTAP 9.10.1 or earlier. + type: int + from_destination: + description: + - Specify the route destination that should be changed. + version_added: 2.8.0 + type: str + from_gateway: + description: + - Specify the route gateway that should be changed. + version_added: 2.8.0 + type: str + from_metric: + description: + - Specify the route metric that should be changed. + - This parameter is ignored, as the value is read from ONTAP. + - Not supported with REST, ignored with ZAPI. + version_added: 2.8.0 + type: int +''' + +EXAMPLES = """ + - name: create route + netapp.ontap.na_ontap_net_routes: + state: present + vserver: "{{ Vserver name }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + destination: 10.7.125.5/20 + gateway: 10.7.125.1 + metric: 30 + + - name: create route - cluster scope, using REST + netapp.ontap.na_ontap_net_routes: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + destination: 10.7.125.5/20 + gateway: 10.7.125.1 + + - name: create route - vserver scope, using REST + netapp.ontap.na_ontap_net_routes: + state: present + vserver: "{{ Vserver name }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + destination: 10.7.125.5/20 + gateway: 10.7.125.1 +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapNetRoutes: + """ + Create, Modifies and Destroys a Net Route + """ + + def __init__(self): + """ + Initialize the Ontap Net Route class + """ + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=False, type='str'), + destination=dict(required=True, type='str'), + gateway=dict(required=True, type='str'), + metric=dict(required=False, type='int'), + from_destination=dict(required=False, type='str', default=None), + from_gateway=dict(required=False, type='str', default=None), + from_metric=dict(required=False, type='int', default=None), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + + # metric supported from ONTAP 9.11.0 version. + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, ['from_metric'], [['metric', (9, 11, 0)]]) + self.validate_options() + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def validate_options(self): + errors = [] + example = '' + if not self.use_rest and 'vserver' not in self.parameters: + # self.module.fail_json(msg="Error: vserver is a required parameter when using ZAPI") + errors.append("vserver is a required parameter when using ZAPI") + for attr in ('destination', 'from_destination'): + value = self.parameters.get(attr) + if value is not None and '/' not in value: + errors.append("Expecting '/' in '%s'" % value) + example = 'Examples: 10.7.125.5/20, fd20:13::/64' + if errors: + if example: + errors.append(example) + self.module.fail_json(msg="Error: %s." % '. '.join(errors)) + + @staticmethod + def sanitize_exception(action, exc): + if action == 'create' and to_native(exc.code) == '13001' and 'already exists' in to_native(exc.message): + return None + if action == 'get' and to_native(exc.code) == "15661": + # Error 15661 denotes a route doesn't exist. + return None + return to_native(exc) + + def create_net_route(self, current=None, fail=True): + """ + Creates a new Route + """ + if current is None: + current = self.parameters + if self.use_rest: + api = 'network/ip/routes' + body = {'gateway': current['gateway']} + dest = current['destination'] + if isinstance(dest, dict): + body['destination'] = dest + else: + dest = current['destination'].split('/') + body['destination'] = {'address': dest[0], 'netmask': dest[1]} + if current.get('vserver') is not None: + body['svm.name'] = current['vserver'] + if current.get('metric') is not None: + body['metric'] = current['metric'] + __, error = rest_generic.post_async(self.rest_api, api, body) + else: + route_obj = netapp_utils.zapi.NaElement('net-routes-create') + route_obj.add_new_child("destination", current['destination']) + route_obj.add_new_child("gateway", current['gateway']) + metric = current.get('metric') + if metric is not None: + route_obj.add_new_child("metric", str(metric)) + try: + self.server.invoke_successfully(route_obj, True) + error = None + except netapp_utils.zapi.NaApiError as exc: + # return if desired route already exists + error = self.sanitize_exception('create', exc) + if error: + error = 'Error creating net route: %s' % error + if fail: + self.module.fail_json(msg=error) + return error + + def delete_net_route(self, current): + """ + Deletes a given Route + """ + if self.use_rest: + uuid = current['uuid'] + api = 'network/ip/routes' + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid) + if error: + self.module.fail_json(msg='Error deleting net route - %s' % error) + else: + route_obj = netapp_utils.zapi.NaElement('net-routes-destroy') + route_obj.add_new_child("destination", current['destination']) + route_obj.add_new_child("gateway", current['gateway']) + try: + self.server.invoke_successfully(route_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting net route: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def recreate_net_route(self, current): + """ + Modify a net route + Since we cannot modify a route, we are deleting the existing route, and creating a new one. + """ + self.delete_net_route(current) + # use existing metric if not specified + if current.get('metric') is not None and self.parameters.get('metric') is None: + self.parameters['metric'] = current['metric'] + error = self.create_net_route(fail=False) + if error: + # restore the old route, create the route with the existing values + self.create_net_route(current) + # Invalid value specified for any of the attributes + self.module.fail_json(msg='Error modifying net route: %s' % error, + exception=traceback.format_exc()) + + def get_net_route(self, params=None): + """ + Checks to see if a route exist or not + :return: NaElement object if a route exists, None otherwise + """ + if params is None: + params = self.parameters + if self.use_rest: + api = "network/ip/routes" + fields = 'destination,gateway,svm,scope' + if self.parameters.get('metric') is not None: + fields += ',metric' + query = {'destination.address': params['destination'].split('/')[0], + 'gateway': params['gateway']} + if params.get('vserver') is None: + query['scope'] = 'cluster' + else: + query['scope'] = 'svm' + query['svm.name'] = params['vserver'] + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error fetching net route: %s' % error) + # even if metric not set, 20 is set by default. + if record and 'metric' not in record: + record['metric'] = None + return record + else: + route_obj = netapp_utils.zapi.NaElement('net-routes-get') + for attr in ('destination', 'gateway'): + route_obj.add_new_child(attr, params[attr]) + try: + result = self.server.invoke_successfully(route_obj, True) + except netapp_utils.zapi.NaApiError as exc: + # Error 15661 denotes a route doesn't exist. + error = self.sanitize_exception('get', exc) + if error is None: + return None + self.module.fail_json(msg='Error fetching net route: %s' % error, + exception=traceback.format_exc()) + if result.get_child_by_name('attributes') is not None: + route_info = result.get_child_by_name('attributes').get_child_by_name('net-vs-routes-info') + return { + 'destination': route_info.get_child_content('destination'), + 'gateway': route_info.get_child_content('gateway'), + 'metric': int(route_info.get_child_content('metric')) + } + return None + + def apply(self): + """ + Run Module based on play book + """ + modify, rename = False, False + current = self.get_net_route() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and any(self.parameters.get(attr) is not None for attr in ('from_gateway', 'from_destination')): + # create by renaming existing route if it exists + # destination and gateway combination is unique, and is considered like an id. + # So modify destination or gateway is considered a rename action. + # If one of 'destination', 'gateway' is not in the from field, use the desired value. + from_params = {'gateway': self.parameters.get('from_gateway', self.parameters['gateway']), + 'destination': self.parameters.get('from_destination', self.parameters['destination'])} + if self.parameters.get('vserver'): + from_params['vserver'] = self.parameters['vserver'] + current = self.get_net_route(from_params) + if current is None: + self.module.fail_json(msg="Error modifying: route %s does not exist" % self.parameters['from_destination']) + rename = True + cd_action = None + + if cd_action is None and self.parameters.get('metric') is not None and current: + modify = self.parameters['metric'] != current['metric'] + if modify: + self.na_helper.changed = True + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_net_route() + elif cd_action == 'delete': + self.delete_net_route(current) + elif rename or modify: + self.recreate_net_route(current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'rename': rename}) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap Net Route object and runs the correct play task + """ + obj = NetAppOntapNetRoutes() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py new file mode 100644 index 000000000..8a4a26a28 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_subnet.py @@ -0,0 +1,426 @@ +#!/usr/bin/python +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_net_subnet +short_description: NetApp ONTAP Create, delete, modify network subnets. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: Storage Engineering (@Albinpopote) +description: + - Create, modify, destroy the network subnet +options: + state: + description: + - Whether the specified network interface group should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + broadcast_domain: + description: + - Specify the required broadcast_domain name for the subnet. + - A broadcast domain can not be modified after the subnet has been created + type: str + + name: + description: + - Specify the subnet name. + required: true + type: str + + from_name: + description: + - Name of the subnet to be renamed + type: str + + gateway: + description: + - Specify the gateway for the default route of the subnet. + type: str + + ipspace: + description: + - Specify the ipspace for the subnet. + - The default value for this parameter is the default IPspace, named 'Default'. + type: str + + ip_ranges: + description: + - Specify the list of IP address ranges associated with the subnet. + type: list + elements: str + + subnet: + description: + - Specify the subnet (ip and mask). + type: str + +notes: + - supports ZAPI and REST. REST requires ONTAP 9.11.1 or later. + - supports check mode. +""" + +EXAMPLES = """ + - name: create subnet + netapp.ontap.na_ontap_net_subnet: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + subnet: 10.10.10.0/24 + name: subnet-adm + ip_ranges: [ '10.10.10.30-10.10.10.40', '10.10.10.51' ] + gateway: 10.10.10.254 + ipspace: Default + broadcast_domain: Default + - name: delete subnet + netapp.ontap.na_ontap_net_subnet: + state: absent + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: subnet-adm + ipspace: Default + - name: rename subnet + netapp.ontap.na_ontap_net_subnet: + state: present + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + name: subnet-adm-new + from_name: subnet-adm + ipspace: Default +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSubnet: + """ + Create, Modifies and Destroys a subnet + """ + def __init__(self): + """ + Initialize the ONTAP Subnet class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + broadcast_domain=dict(required=False, type='str'), + gateway=dict(required=False, type='str'), + ip_ranges=dict(required=False, type='list', elements='str'), + ipspace=dict(required=False, type='str'), + subnet=dict(required=False, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.uuid = None + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1): + msg = 'REST requires ONTAP 9.11.1 or later for network/ip/subnets APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_subnet(self, name=None): + """ + Return details about the subnet + :param: + name : Name of the subnet + :return: Details about the subnet. None if not found. + :rtype: dict + """ + if name is None: + name = self.parameters.get('name') + if self.use_rest: + return self.get_subnet_rest(name) + subnet_iter = netapp_utils.zapi.NaElement('net-subnet-get-iter') + subnet_info = netapp_utils.zapi.NaElement('net-subnet-info') + subnet_info.add_new_child('subnet-name', name) + if self.parameters.get('ipspace'): + subnet_info.add_new_child('ipspace', self.parameters['ipspace']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(subnet_info) + + subnet_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(subnet_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching subnet %s: %s' % (name, to_native(error))) + return_value = None + # check if query returns the expected subnet + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + + subnet_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-subnet-info') + broadcast_domain = subnet_attributes.get_child_content('broadcast-domain') + gateway = subnet_attributes.get_child_content('gateway') + ipspace = subnet_attributes.get_child_content('ipspace') + subnet = subnet_attributes.get_child_content('subnet') + name = subnet_attributes.get_child_content('subnet-name') + + ip_ranges = [] + if subnet_attributes.get_child_by_name('ip-ranges'): + range_obj = subnet_attributes.get_child_by_name('ip-ranges').get_children() + ip_ranges = [elem.get_content() for elem in range_obj] + + return_value = { + 'name': name, + 'broadcast_domain': broadcast_domain, + 'gateway': gateway, + 'ip_ranges': ip_ranges, + 'ipspace': ipspace, + 'subnet': subnet + } + + return return_value + + def create_subnet(self): + """ + Creates a new subnet + """ + if self.use_rest: + return self.create_subnet_rest() + subnet_create = self.build_zapi_request_for_create_or_modify('net-subnet-create') + try: + self.server.invoke_successfully(subnet_create, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters.get('name'), to_native(error)), + exception=traceback.format_exc()) + + def delete_subnet(self): + """ + Deletes a subnet + """ + if self.use_rest: + return self.delete_subnet_rest() + subnet_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-subnet-destroy', **{'subnet-name': self.parameters.get('name')}) + if self.parameters.get('ipspace'): + subnet_delete.add_new_child('ipspace', self.parameters.get('ipspace')) + + try: + self.server.invoke_successfully(subnet_delete, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)), + exception=traceback.format_exc()) + + def modify_subnet(self, modify): + """ + Modifies a subnet + """ + if self.use_rest: + return self.modify_subnet_rest(modify) + subnet_modify = self.build_zapi_request_for_create_or_modify('net-subnet-modify') + try: + self.server.invoke_successfully(subnet_modify, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)), + exception=traceback.format_exc()) + + def build_zapi_request_for_create_or_modify(self, zapi): + simple_keys = ['gateway', 'ipspace', 'subnet'] + + # required parameters + options = {'subnet-name': self.parameters.get('name')} + if zapi == 'net-subnet-create': + options['broadcast-domain'] = self.parameters.get('broadcast_domain') + options['subnet'] = self.parameters.get('subnet') + simple_keys.remove('subnet') + + # optional parameters + for key in simple_keys: + value = self.parameters.get(key) + if value is not None: + options[key] = value + + result = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + if self.parameters.get('ip_ranges'): + subnet_ips = netapp_utils.zapi.NaElement('ip-ranges') + for ip_range in self.parameters.get('ip_ranges'): + subnet_ips.add_new_child('ip-range', ip_range) + result.add_child_elem(subnet_ips) + + return result + + def rename_subnet(self): + """ + TODO + """ + options = {'subnet-name': self.parameters.get('from_name'), + 'new-name': self.parameters.get('name')} + + subnet_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'net-subnet-rename', **options) + + if self.parameters.get('ipspace'): + subnet_rename.add_new_child('ipspace', self.parameters.get('ipspace')) + + try: + self.server.invoke_successfully(subnet_rename, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming subnet %s: %s' % (self.parameters.get('name'), to_native(error)), + exception=traceback.format_exc()) + + def get_subnet_rest(self, name): + api = 'network/ip/subnets' + params = { + 'name': name, + 'fields': 'available_ip_ranges,name,broadcast_domain,ipspace,gateway,subnet,uuid' + } + if self.parameters.get('ipspace'): + params['ipspace.name'] = self.parameters['ipspace'] + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error fetching subnet %s: %s" % (name, error)) + current = None + if record: + self.uuid = record['uuid'] + current = { + 'name': record['name'], + 'broadcast_domain': self.na_helper.safe_get(record, ['broadcast_domain', 'name']), + 'gateway': self.na_helper.safe_get(record, ['gateway']), + 'ipspace': self.na_helper.safe_get(record, ['ipspace', 'name']), + 'subnet': record['subnet']['address'] + '/' + record['subnet']['netmask'], + 'ip_ranges': [] + } + for each_range in record.get('available_ip_ranges', []): + if each_range['start'] == each_range['end']: + current['ip_ranges'].append(each_range['start']) + else: + current['ip_ranges'].append(each_range['start'] + '-' + each_range['end']) + return current + + def create_subnet_rest(self): + api = 'network/ip/subnets' + dummy, error = rest_generic.post_async(self.rest_api, api, self.form_create_modify_body_rest()) + if error: + self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters['name'], to_native(error))) + + def modify_subnet_rest(self, modify): + api = 'network/ip/subnets' + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, self.form_create_modify_body_rest(modify)) + if error: + self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error))) + + def delete_subnet_rest(self): + api = 'network/ip/subnets' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error))) + + def form_create_modify_body_rest(self, params=None): + if params is None: + params = self.parameters + body = {'name': self.parameters['name']} + if params.get('broadcast_domain'): + body['broadcast_domain.name'] = params['broadcast_domain'] + if params.get('subnet'): + if '/' not in params['subnet']: + self.module.fail_json(msg="Error: Invalid value specified for subnet %s" % params['subnet']) + body['subnet.address'] = params['subnet'].split('/')[0] + body['subnet.netmask'] = params['subnet'].split('/')[1] + if params.get('gateway'): + body['gateway'] = params['gateway'] + if params.get('ipspace'): + body['ipspace.name'] = params['ipspace'] + ip_ranges = [] + for each_range in params.get('ip_ranges', []): + if '-' in each_range: + ip_ranges.append({ + 'start': each_range.split('-')[0], + 'end': each_range.split('-')[1] + }) + else: + ip_ranges.append({ + 'start': each_range, + 'end': each_range + }) + if ip_ranges or params.get('ip_ranges') == []: + body['ip_ranges'] = ip_ranges + return body + + def apply(self): + '''Apply action to subnet''' + current = self.get_subnet() + rename, modify = None, None + + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # creating new subnet by renaming + current = self.get_subnet(self.parameters.get('from_name')) + if current is None: + self.module.fail_json(msg="Error renaming: subnet %s does not exist" % + self.parameters.get('from_name')) + rename = True + cd_action = None + if self.use_rest: + # patch takes care of renaming subnet too. + rename = False + + if self.parameters['state'] == 'present' and current: + if not self.use_rest: + current.pop('name', None) # handled in rename + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if 'broadcast_domain' in modify: + self.module.fail_json(msg='Error modifying subnet %s: cannot modify broadcast_domain parameter, desired "%s", currrent "%s"' + % (self.parameters.get('name'), self.parameters.get('broadcast_domain'), current.get('broadcast_domain'))) + + if cd_action == 'create': + for attribute in ['subnet', 'broadcast_domain']: + if not self.parameters.get(attribute): + self.module.fail_json(msg='Error - missing required arguments: %s.' % attribute) + + if self.na_helper.changed and not self.module.check_mode: + if rename: + self.rename_subnet() + # If rename is True, cd_action is None but modify could be true + if cd_action == 'create': + self.create_subnet() + elif cd_action == 'delete': + self.delete_subnet() + elif modify: + self.modify_subnet(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp ONTAP Net Route object and runs the correct play task + """ + subnet_obj = NetAppOntapSubnet() + subnet_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py new file mode 100644 index 000000000..ed94b4728 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_net_vlan.py @@ -0,0 +1,367 @@ +#!/usr/bin/python + +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_net_vlan +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_net_vlan +short_description: NetApp ONTAP network VLAN +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create/Modify/Delete network VLAN +- Modify VLAN are supported only with REST +- broadcast_domain, ipspace and enabled keys are supported with REST and is ignored with ZAPI +options: + state: + description: + - Whether the specified network VLAN should exist or not + choices: ['present', 'absent'] + type: str + default: present + parent_interface: + description: + - The interface that hosts the VLAN interface. + required: true + type: str + vlanid: + description: + - The VLAN id. Ranges from 1 to 4094. + required: true + type: int + node: + description: + - Node name of VLAN interface. + required: true + type: str + broadcast_domain: + description: + - Specify the broadcast_domain name. + - Only supported with REST and is ignored with ZAPI. + - Required with 9.6 and 9.7, but optional with 9.8 or later. + type: str + version_added: 21.13.0 + ipspace: + description: + - Specify the ipspace for the broadcast domain. + - Only supported with REST and is ignored with ZAPI. + - Required with 9.6 and 9.7, but optional with 9.8 or later. + type: str + version_added: 21.13.0 + enabled: + description: + - Enable/Disable Net vlan. + - Only supported with REST and is ignored with ZAPI. + type: bool + version_added: 21.13.0 +notes: + - The C(interface_name) option has been removed and should be deleted from playbooks +''' + +EXAMPLES = """ + - name: create VLAN + netapp.ontap.na_ontap_net_vlan: + state: present + vlanid: 13 + node: "{{ vlan_node }}" + ipspace: "{{ ipspace_name }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + + - name: Create and add vlan to broadcast domain - REST + netapp.ontap.na_ontap_net_vlan: + state: present + vlanid: 14 + node: "{{ vlan_node }}" + parent_interface: "{{ vlan_parent_interface_name }}" + broadcast_domain: "{{ broadcast_domain_name }}" + ipspace: "{{ ipspace_name }}" + enabled: true + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + + - name: Disable VLAN - REST + netapp.ontap.na_ontap_net_vlan: + state: present + vlanid: 14 + node: "{{ vlan_node }}" + parent_interface: "{{ vlan_parent_interface_name }}" + enabled: false + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + + - name: Delete VLAN + netapp.ontap.na_ontap_net_vlan: + state: absent + vlanid: 14 + node: "{{ vlan_node }}" + parent_interface: "{{ vlan_parent_interface_name }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" +""" + +RETURN = """ + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapVlan: + """ + Created, and destorys Net Vlans's + """ + def __init__(self): + """ + Initializes the NetAppOntapVlan function + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + parent_interface=dict(required=True, type='str'), + vlanid=dict(required=True, type='int'), + node=dict(required=True, type='str'), + broadcast_domain=dict(required=False, type='str'), + ipspace=dict(required=False, type='str'), + enabled=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_together=[['broadcast_domain', 'ipspace']], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + self.parameters['interface_name'] = "%s-%s" % (self.parameters['parent_interface'], self.parameters['vlanid']) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0): + if 'broadcast_domain' not in self.parameters and 'ipspace' not in self.parameters and self.parameters['state'] == 'present': + error_msg = 'broadcast_domain and ipspace are required fields with ONTAP 9.6 and 9.7' + self.module.fail_json(msg=error_msg) + + if not self.use_rest and ('broadcast_domain' in self.parameters or 'enabled' in self.parameters): + msg = 'Using ZAPI and ignoring keys - enabled, broadcast_domain and ipspace' + self.module.warn(msg) + self.parameters.pop('broadcast_domain', None) + self.parameters.pop('ipspace', None) + self.parameters.pop('enabled', None) + + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def invoke_vlan(self, zapi): + """ + Invoke zapi - add/delete take the same NaElement structure + """ + vlan_obj = netapp_utils.zapi.NaElement(zapi) + vlan_info = self.create_vlan_info() + vlan_obj.add_child_elem(vlan_info) + try: + self.server.invoke_successfully(vlan_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if zapi == 'net-vlan-create': + action = 'adding' + elif zapi == 'net-vlan-delete': + action = 'deleting' + else: + action = 'unexpected' + self.module.fail_json(msg='Error %s Net Vlan %s: %s' % (action, self.parameters['interface_name'], to_native(error)), + exception=traceback.format_exc()) + + def create_vlan(self): + """ + Creates a new vlan + """ + if self.use_rest: + api = 'network/ethernet/ports' + body = { + 'type': 'vlan', + 'node': {'name': self.parameters['node']}, + 'vlan': { + 'base_port': { + 'name': self.parameters['parent_interface'], + 'node': {'name': self.parameters['node']} + }, + 'tag': self.parameters['vlanid'] + } + } + if 'broadcast_domain' in self.parameters: + body['broadcast_domain'] = {'name': self.parameters['broadcast_domain']} + body['broadcast_domain']['ipspace'] = {'name': self.parameters['ipspace']} + if 'enabled' in self.parameters: + body['enabled'] = self.parameters['enabled'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg=error) + else: + self.invoke_vlan('net-vlan-create') + + def delete_vlan(self, current=None): + """ + Deletes a vland + """ + if self.use_rest: + uuid = current['uuid'] + api = 'network/ethernet/ports' + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid) + if error: + self.module.fail_json(msg=error) + else: + self.invoke_vlan('net-vlan-delete') + + def get_vlan(self): + """ + Checks to see if a vlan already exists or not + :return: Returns dictionary of attributes if the vlan exists, None if it dosn't + """ + if self.use_rest: + return self.get_vlan_rest() + vlan_obj = netapp_utils.zapi.NaElement("net-vlan-get-iter") + query = { + 'query': { + 'vlan-info': { + 'interface-name': self.parameters['interface_name'], + 'node': self.parameters['node'] + } + } + } + vlan_obj.translate_struct(query) + try: + result = self.server.invoke_successfully(vlan_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + # This checks desired vlan already exists and returns interface_name and node + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1: + vlan_info = result.get_child_by_name('attributes-list').get_child_by_name('vlan-info') + current = { + 'interface_name': vlan_info.get_child_content('interface-name'), + 'node': vlan_info.get_child_content('node') + } + return current + return None + + def get_vlan_rest(self): + api = 'network/ethernet/ports' + query = { + 'name': self.parameters['interface_name'], + 'node.name': self.parameters['node'], + } + fields = 'name,node,uuid,broadcast_domain,enabled' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + if record: + current = { + 'interface_name': record['name'], + 'node': record['node']['name'], + 'uuid': record['uuid'], + 'enabled': record['enabled'] + } + if 'broadcast_domain' in record: + current['broadcast_domain'] = record['broadcast_domain']['name'] + current['ipspace'] = record['broadcast_domain']['ipspace']['name'] + return current + return None + + def modify_vlan(self, current, modify): + """ + Modify broadcast domain, ipspace and enable/disable vlan + """ + uuid = current['uuid'] + api = 'network/ethernet/ports' + body = {} + # Requires both broadcast_domain and ipspace in body + # of PATCH call if any one of it present in modify + if 'broadcast_domain' in modify or 'ipspace' in modify: + broadcast_domain = modify['broadcast_domain'] if 'broadcast_domain' in modify else current['broadcast_domain'] + ipspace = modify['ipspace'] if 'ipspace' in modify else current['ipspace'] + body['broadcast_domain'] = {'name': broadcast_domain} + body['broadcast_domain']['ipspace'] = {'name': ipspace} + if 'enabled' in modify: + body['enabled'] = modify['enabled'] + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg=error) + + def create_vlan_info(self): + """ + Create a vlan_info object to be used in a create/delete + :return: + """ + vlan_info = netapp_utils.zapi.NaElement("vlan-info") + + # set up the vlan_info object: + vlan_info.add_new_child("parent-interface", self.parameters['parent_interface']) + vlan_info.add_new_child("vlanid", str(self.parameters['vlanid'])) + vlan_info.add_new_child("node", self.parameters['node']) + return vlan_info + + def apply(self): + """ + check the option in the playbook to see what needs to be done + :return: + """ + modify = None + current = self.get_vlan() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.use_rest and cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_vlan() + # enabled key in POST call has no effect + # applying PATCH if there is change in default value + if self.use_rest: + current = self.get_vlan_rest() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if cd_action == 'delete': + self.delete_vlan(current) + if modify: + self.modify_vlan(current, modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap vlan object, and runs the correct play task. + """ + vlan_obj = NetAppOntapVlan() + vlan_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py new file mode 100644 index 000000000..a1315df1b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nfs.py @@ -0,0 +1,700 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_ontap_nfs +short_description: NetApp ONTAP NFS status +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Enable or disable NFS on ONTAP +options: + state: + description: + - Whether NFS should exist or not. + choices: ['present', 'absent'] + type: str + default: present + service_state: + description: + - Whether the specified NFS should be enabled or disabled. Creates NFS service if doesnt exist. + choices: ['started', 'stopped'] + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + nfsv3: + description: + - status of NFSv3. + choices: ['enabled', 'disabled'] + type: str + nfsv3_fsid_change: + description: + - status of if NFSv3 clients see change in FSID as they traverse filesystems. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv4_fsid_change: + description: + - status of if NFSv4 clients see change in FSID as they traverse filesystems. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.9.0 + nfsv4: + description: + - status of NFSv4. + choices: ['enabled', 'disabled'] + type: str + nfsv41: + description: + - status of NFSv41. + - usage of C(nfsv4.1) is deprecated as it does not match Ansible naming convention. The alias will be removed. + - please use C(nfsv41) exclusively for this option. + aliases: ['nfsv4.1'] + choices: ['enabled', 'disabled'] + type: str + nfsv41_pnfs: + description: + - status of NFSv41 pNFS. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.9.0 + nfsv4_numeric_ids: + description: + - status of NFSv4 numeric ID's. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.9.0 + vstorage_state: + description: + - status of vstorage_state. + choices: ['enabled', 'disabled'] + type: str + nfsv4_id_domain: + description: + - Name of the nfsv4_id_domain to use. + type: str + nfsv40_acl: + description: + - status of NFS v4.0 ACL feature + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv40_read_delegation: + description: + - status for NFS v4.0 read delegation feature. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv40_write_delegation: + description: + - status for NFS v4.0 write delegation feature. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv41_acl: + description: + - status of NFS v4.1 ACL feature + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv41_read_delegation: + description: + - status for NFS v4.1 read delegation feature. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv41_write_delegation: + description: + - status for NFS v4.1 write delegation feature. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + nfsv40_referrals: + description: + - status for NFS v4.0 referrals. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.9.0 + nfsv41_referrals: + description: + - status for NFS v4.1 referrals. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.9.0 + tcp: + description: + - Enable TCP (support from ONTAP 9.3 onward). + choices: ['enabled', 'disabled'] + type: str + udp: + description: + - Enable UDP (support from ONTAP 9.3 onward). + choices: ['enabled', 'disabled'] + type: str + showmount: + description: + - Whether SVM allows showmount. + - With REST, supported from ONTAP 9.8 version. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.7.0 + tcp_max_xfer_size: + description: + - TCP Maximum Transfer Size (bytes). The default value is 65536. + - This option requires ONTAP 9.11.0 or later in REST. + version_added: 2.8.0 + type: int + windows: + description: + - This option can be set or modified when using REST. + - It requires ONTAP 9.11.0 or later. + version_added: 22.3.0 + type: dict + suboptions: + default_user: + description: + - Specifies the default Windows user for the NFS server. + type: str + map_unknown_uid_to_default_user: + description: + - Specifies whether or not the mapping of an unknown UID to the default Windows user is enabled. + type: bool + v3_ms_dos_client_enabled: + description: + - Specifies whether NFSv3 MS-DOS client support is enabled. + type: bool + root: + description: + - This option can be set or modified when using REST. + - It requires ONTAP 9.11.0 or later. + type: dict + version_added: 22.3.0 + suboptions: + ignore_nt_acl: + description: + - Specifies whether Windows ACLs affect root access from NFS. + - If this option is enabled, root access from NFS ignores the NT ACL set on the file or directory. + type: bool + skip_write_permission_check: + description: + - Specifies if permission checks are to be skipped for NFS WRITE calls from root/owner. + - For copying read-only files to a destination folder which has inheritable ACLs, this option must be enabled. + type: bool + security: + description: + - This option can be set or modified when using REST. + - It requires ONTAP 9.11.0 or later. + type: dict + version_added: 22.3.0 + suboptions: + chown_mode: + description: + - Specifies whether file ownership can be changed only by the superuser, or if a non-root user can also change file ownership. + - If this option is set to restricted, file ownership can be changed only by the superuser, + even though the on-disk permissions allow a non-root user to change file ownership. + - If this option is set to unrestricted, file ownership can be changed by the superuser and by the non-root user, + depending upon the access granted by on-disk permissions. + - If this option is set to use-export-policy, file ownership can be changed in accordance with the relevant export rules. + choices: ['restricted', 'unrestricted', 'use_export_policy'] + type: str + nt_acl_display_permission: + description: + - Controls the permissions that are displayed to NFSv3 and NFSv4 clients on a file or directory that has an NT ACL set. + - When true, the displayed permissions are based on the maximum access granted by the NT ACL to any user. + - When false, the displayed permissions are based on the minimum access granted by the NT ACL to any user. + type: bool + ntfs_unix_security: + description: + - Specifies how NFSv3 security changes affect NTFS volumes. + - If this option is set to ignore, ONTAP ignores NFSv3 security changes. + - If this option is set to fail, this overrides the UNIX security options set in the relevant export rules. + - If this option is set to use_export_policy, ONTAP processes NFSv3 security changes in accordance with the relevant export rules. + choices: ['ignore', 'fail', 'use_export_policy'] + type: str + permitted_encryption_types: + description: + - Specifies the permitted encryption types for Kerberos over NFS. + type: list + elements: str + rpcsec_context_idle: + description: + - Specifies, in seconds, the amount of time a RPCSEC_GSS context is permitted to remain unused before it is deleted. + type: int +""" + +EXAMPLES = """ + - name: change nfs status + netapp.ontap.na_ontap_nfs: + state: present + service_state: stopped + vserver: vs_hack + nfsv3: disabled + nfsv4: disabled + nfsv41: enabled + tcp: disabled + udp: disabled + vstorage_state: disabled + nfsv4_id_domain: example.com + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: create nfs configuration - REST + netapp.ontap.na_ontap_nfs: + state: present + service_state: stopped + vserver: vs_hack + nfsv3: disabled + nfsv4: disabled + nfsv41: enabled + tcp: disabled + udp: disabled + vstorage_state: disabled + nfsv4_id_domain: example.com + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify nfs configuration - REST + netapp.ontap.na_ontap_nfs: + state: present + vserver: vs_hack + root: + ignore_nt_acl: true + skip_write_permission_check: true + security: + chown_mode: restricted + nt_acl_display_permission: true + ntfs_unix_security: fail + rpcsec_context_idle: 5 + windows: + v3_ms_dos_client_enabled: true + map_unknown_uid_to_default_user: false + default_user: test_user + tcp_max_xfer_size: 16384 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete nfs configuration + netapp.ontap.na_ontap_nfs: + state: absent + vserver: vs_hack + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPNFS: + """ object initialize and class methods """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + service_state=dict(required=False, type='str', choices=['started', 'stopped']), + vserver=dict(required=True, type='str'), + nfsv3=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv3_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv4_fsid_change=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv4=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv41=dict(required=False, type='str', default=None, choices=['enabled', 'disabled'], aliases=['nfsv4.1']), + nfsv41_pnfs=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv4_numeric_ids=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + vstorage_state=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + tcp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']), + udp=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']), + nfsv4_id_domain=dict(required=False, type='str', default=None), + nfsv40_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv40_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv40_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv40_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv41_acl=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv41_read_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv41_referrals=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + nfsv41_write_delegation=dict(required=False, type='str', default=None, choices=['enabled', 'disabled']), + showmount=dict(required=False, default=None, type='str', choices=['enabled', 'disabled']), + tcp_max_xfer_size=dict(required=False, default=None, type='int'), + + # security + security=dict(type='dict', options=dict( + rpcsec_context_idle=dict(required=False, type='int'), + ntfs_unix_security=dict(required=False, type='str', choices=['ignore', 'fail', 'use_export_policy']), + chown_mode=dict(required=False, type='str', choices=['restricted', 'unrestricted', 'use_export_policy']), + nt_acl_display_permission=dict(required=False, type='bool'), + permitted_encryption_types=dict(type='list', elements='str', required=False), + )), + # root + root=dict(type='dict', options=dict( + ignore_nt_acl=dict(required=False, type='bool'), + skip_write_permission_check=dict(required=False, type='bool'), + )), + # windows + windows=dict(type='dict', options=dict( + map_unknown_uid_to_default_user=dict(required=False, type='bool'), + v3_ms_dos_client_enabled=dict(required=False, type='bool'), + default_user=dict(required=False, type='str'), + )), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.zapi_names = { + 'nfsv3': 'is-nfsv3-enabled', # REST: protocol.v3_enabled + 'nfsv3_fsid_change': 'is-nfsv3-fsid-change-enabled', + 'nfsv4_fsid_change': 'is-nfsv4-fsid-change-enabled', + 'nfsv4': 'is-nfsv40-enabled', # REST: protocol.v40_enabled + 'nfsv41': 'is-nfsv41-enabled', # REST: protocol.v41_enabled + 'nfsv41_pnfs': 'is-nfsv41-pnfs-enabled', # protocol.v41_features.pnfs_enabled + 'nfsv4_numeric_ids': 'is-nfsv4-numeric-ids-enabled', + 'vstorage_state': 'is-vstorage-enabled', # REST: vstorage_enabled + 'nfsv4_id_domain': 'nfsv4-id-domain', # REST: protocol.v4_id_domain + 'tcp': 'is-tcp-enabled', # REST: transport.tcp_enabled + 'udp': 'is-udp-enabled', # REST: transport.udp_enabled + 'nfsv40_acl': 'is-nfsv40-acl-enabled', # REST: protocol.v40_features.acl_enabled + 'nfsv40_read_delegation': 'is-nfsv40-read-delegation-enabled', # REST: protocol.v40_features.read_delegation_enabled + 'nfsv40_referrals': 'is-nfsv40-referrals-enabled', + 'nfsv40_write_delegation': 'is-nfsv40-write-delegation-enabled', # REST: protocol.v40_features.write_delegation_enabled + 'nfsv41_acl': 'is-nfsv41-acl-enabled', # REST: protocol.v41_features.acl_enabled + 'nfsv41_read_delegation': 'is-nfsv41-read-delegation-enabled', # REST: protocol.v41_features.read_delegation_enabled + 'nfsv41_referrals': 'is-nfsv41-referrals-enabled', + 'nfsv41_write_delegation': 'is-nfsv41-write-delegation-enabled', # REST: protocol.v41_features.write_delegation_enabled + 'showmount': 'showmount', # REST: showmount_enabled + 'tcp_max_xfer_size': 'tcp-max-xfer-size' + } + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + unsupported_rest_properties = ['nfsv3_fsid_change', + 'nfsv4_fsid_change', + 'nfsv4_numeric_ids', + 'nfsv40_referrals', + 'nfsv41_referrals'] + partially_supported_rest_properties = [['showmount', (9, 8)], ['root', (9, 11, 0)], ['windows', (9, 11, 0)], ['security', (9, 11, 0)], + ['tcp_max_xfer_size', (9, 11, 0)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + if 'nfsv4.1' in self.parameters: + self.module.warn('Error: "nfsv4.1" option conflicts with Ansible naming conventions - please use "nfsv41".') + self.svm_uuid = None + self.unsupported_zapi_properties = ['root', 'windows', 'security'] + self.parameters = self.na_helper.filter_out_none_entries(self.parameters) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + for unsupported_zapi_property in self.unsupported_zapi_properties: + if self.parameters.get(unsupported_zapi_property) is not None: + msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property + self.module.fail_json(msg=msg) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_nfs_service(self): + if self.use_rest: + return self.get_nfs_service_rest() + nfs_get_iter = netapp_utils.zapi.NaElement('nfs-service-get-iter') + nfs_info = netapp_utils.zapi.NaElement('nfs-info') + nfs_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(nfs_info) + nfs_get_iter.add_child_elem(query) + result = self.server.invoke_successfully(nfs_get_iter, True) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return self.format_return(result) + return None + + def format_return(self, result): + attributes_list = result.get_child_by_name('attributes-list').get_child_by_name('nfs-info') + return { + 'nfsv3': self.convert_from_bool(attributes_list.get_child_content('is-nfsv3-enabled')), + 'nfsv3_fsid_change': self.convert_from_bool(attributes_list.get_child_content('is-nfsv3-fsid-change-enabled')), + 'nfsv4_fsid_change': self.convert_from_bool(attributes_list.get_child_content('is-nfsv4-fsid-change-enabled')), + 'nfsv4': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-enabled')), + 'nfsv41': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-enabled')), + 'nfsv41_pnfs': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-pnfs-enabled')), + 'nfsv4_numeric_ids': self.convert_from_bool(attributes_list.get_child_content('is-nfsv4-numeric-ids-enabled')), + 'vstorage_state': self.convert_from_bool(attributes_list.get_child_content('is-vstorage-enabled')), + 'nfsv4_id_domain': attributes_list.get_child_content('nfsv4-id-domain'), + 'tcp': self.convert_from_bool(attributes_list.get_child_content('is-tcp-enabled')), + 'udp': self.convert_from_bool(attributes_list.get_child_content('is-udp-enabled')), + 'nfsv40_acl': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-acl-enabled')), + 'nfsv40_read_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-read-delegation-enabled')), + 'nfsv40_referrals': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-referrals-enabled')), + 'nfsv40_write_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv40-write-delegation-enabled')), + 'nfsv41_acl': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-acl-enabled')), + 'nfsv41_read_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-read-delegation-enabled')), + 'nfsv41_referrals': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-referrals-enabled')), + 'nfsv41_write_delegation': self.convert_from_bool(attributes_list.get_child_content('is-nfsv41-write-delegation-enabled')), + 'showmount': self.convert_from_bool(attributes_list.get_child_content('showmount')), + 'tcp_max_xfer_size': self.na_helper.get_value_for_int(True, attributes_list.get_child_content('tcp-max-xfer-size')) + } + + def get_nfs_status(self): + nfs_status = netapp_utils.zapi.NaElement('nfs-status') + result = self.server.invoke_successfully(nfs_status, True) + return result.get_child_content('is-enabled') + + def create_nfs_service(self): + if self.use_rest: + return self.create_nfs_service_rest() + # This is what the old module did, not sure what happens if nfs dosn't exist. + self.enable_nfs() + + def enable_nfs(self): + """ + enable nfs (online). If the NFS service was not explicitly created, + this API will create one with default options. + """ + nfs_enable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-enable') + try: + self.server.invoke_successfully(nfs_enable, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' % + (self.parameters['vserver'], self.parameters['service_state'], to_native(error)), + exception=traceback.format_exc()) + + def disable_nfs(self): + """ + disable nfs (offline). + """ + nfs_disable = netapp_utils.zapi.NaElement.create_node_with_children('nfs-disable') + try: + self.server.invoke_successfully(nfs_disable, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error changing the service_state of nfs %s to %s: %s' % + (self.parameters['vserver'], self.parameters['service_state'], to_native(error)), + exception=traceback.format_exc()) + + def modify_nfs_service(self, modify): + if self.use_rest: + return self.modify_nfs_service_rest(modify) + # This is what the old module did, not sure what happens if nfs dosn't exist. + nfs_modify = netapp_utils.zapi.NaElement('nfs-service-modify') + service_state = modify.pop('service_state', None) + self.modify_service_state(service_state) + for each in modify: + if each in ['nfsv4_id_domain', 'tcp_max_xfer_size']: + nfs_modify.add_new_child(self.zapi_names[each], str(modify[each])) + else: + nfs_modify.add_new_child(self.zapi_names[each], self.convert_to_bool(modify[each])) + try: + self.server.invoke_successfully(nfs_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying nfs: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def modify_service_state(self, service_state): + nfs_enabled = self.get_nfs_status() + if service_state == 'started' and nfs_enabled == 'false': + self.enable_nfs() + elif service_state == 'stopped' and nfs_enabled == 'true': + self.disable_nfs() + + def delete_nfs_service(self): + """ + delete nfs service. + """ + if self.use_rest: + return self.delete_nfs_service_rest() + nfs_delete = netapp_utils.zapi.NaElement.create_node_with_children('nfs-service-destroy') + try: + self.server.invoke_successfully(nfs_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting nfs: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def get_nfs_service_rest(self): + api = 'protocols/nfs/services' + params = {'svm.name': self.parameters['vserver'], + 'fields': 'protocol.v3_enabled,' + 'protocol.v40_enabled,' + 'protocol.v41_enabled,' + 'protocol.v41_features.pnfs_enabled,' + 'vstorage_enabled,' + 'protocol.v4_id_domain,' + 'transport.tcp_enabled,' + 'transport.udp_enabled,' + 'protocol.v40_features.acl_enabled,' + 'protocol.v40_features.read_delegation_enabled,' + 'protocol.v40_features.write_delegation_enabled,' + 'protocol.v41_features.acl_enabled,' + 'protocol.v41_features.read_delegation_enabled,' + 'protocol.v41_features.write_delegation_enabled,' + 'enabled,' + 'svm.uuid,'} + if self.parameters.get('showmount'): + params['fields'] += 'showmount_enabled,' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 0): + params['fields'] += 'root.*,security.*,windows.*,transport.tcp_max_transfer_size' + # TODO: might return more than 1 record, find out + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error getting nfs services for SVM %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + return self.format_get_nfs_service_rest(record) if record else record + + def format_get_nfs_service_rest(self, record): + return { + 'nfsv3': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v3_enabled'])), + 'nfsv4': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_enabled'])), + 'nfsv41': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_enabled'])), + 'nfsv41_pnfs': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'pnfs_enabled'])), + 'vstorage_state': self.convert_from_bool(self.na_helper.safe_get(record, ['vstorage_enabled'])), + 'nfsv4_id_domain': self.na_helper.safe_get(record, ['protocol', 'v4_id_domain']), + 'tcp': self.convert_from_bool(self.na_helper.safe_get(record, ['transport', 'tcp_enabled'])), + 'udp': self.convert_from_bool(self.na_helper.safe_get(record, ['transport', 'udp_enabled'])), + 'tcp_max_xfer_size': self.na_helper.safe_get(record, ['transport', 'tcp_max_transfer_size']), + 'nfsv40_acl': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_features', 'acl_enabled'])), + 'nfsv40_read_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_features', 'read_delegation_enabled'])), + 'nfsv40_write_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v40_features', 'write_delegation_enabled'])), + 'nfsv41_acl': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'acl_enabled'])), + 'nfsv41_read_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'read_delegation_enabled'])), + 'nfsv41_write_delegation': self.convert_from_bool(self.na_helper.safe_get(record, ['protocol', 'v41_features', 'write_delegation_enabled'])), + 'showmount': self.convert_from_bool(self.na_helper.safe_get(record, ['showmount_enabled'])), + 'svm_uuid': self.na_helper.safe_get(record, ['svm', 'uuid']), + 'service_state': self.convert_from_bool_to_started(self.na_helper.safe_get(record, ['enabled'])), + 'root': self.na_helper.safe_get(record, ['root']), + 'windows': self.na_helper.safe_get(record, ['windows']), + 'security': self.na_helper.safe_get(record, ['security']), + } + + def create_nfs_service_rest(self): + api = 'protocols/nfs/services' + body = {'svm.name': self.parameters['vserver']} + body.update(self.create_modify_body(body)) + dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating nfs service for SVM %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def delete_nfs_service_rest(self): + if self.svm_uuid is None: + self.module.fail_json(msg='Error deleting nfs service for SVM %s: svm.uuid is None' % self.parameters['vserver']) + dummy, error = rest_generic.delete_async(self.rest_api, 'protocols/nfs/services', self.svm_uuid, job_timeout=120) + if error: + self.module.fail_json(msg='Error deleting nfs service for SVM %s' % self.parameters['vserver']) + + def modify_nfs_service_rest(self, modify): + if self.svm_uuid is None: + self.module.fail_json(msg='Error modifying nfs service for SVM %s: svm.uuid is None' % self.parameters['vserver']) + api = 'protocols/nfs/services' + body = {} + body.update(self.create_modify_body(body, modify)) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error modifying nfs service for SVM %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def create_modify_body(self, body, modify=None): + params = modify or self.parameters + if params.get('nfsv3') is not None: + body['protocol.v3_enabled'] = self.convert_to_bool(params['nfsv3']) + if params.get('nfsv4') is not None: + body['protocol.v40_enabled'] = self.convert_to_bool(params['nfsv4']) + if params.get('nfsv41') is not None: + body['protocol.v41_enabled'] = self.convert_to_bool(params['nfsv41']) + if params.get('nfsv41_pnfs') is not None: + body['protocol.v41_features.pnfs_enabled'] = self.convert_to_bool(params['nfsv41_pnfs']) + if params.get('vstorage_state') is not None: + body['vstorage_enabled'] = self.convert_to_bool(params['vstorage_state']) + if params.get('nfsv4_id_domain') is not None: + body['protocol.v4_id_domain'] = params['nfsv4_id_domain'] + if params.get('tcp') is not None: + body['transport.tcp_enabled'] = self.convert_to_bool(params['tcp']) + if params.get('udp') is not None: + body['transport.udp_enabled'] = self.convert_to_bool(params['udp']) + if params.get('nfsv40_acl') is not None: + body['protocol.v40_features.acl_enabled'] = self.convert_to_bool(params['nfsv40_acl']) + if params.get('nfsv40_read_delegation') is not None: + body['protocol.v40_features.read_delegation_enabled'] = self.convert_to_bool(params['nfsv40_read_delegation']) + if params.get('nfsv40_write_delegation') is not None: + body['protocol.v40_features.write_delegation_enabled'] = self.convert_to_bool(params['nfsv40_write_delegation']) + if params.get('nfsv41_acl') is not None: + body['protocol.v41_features.acl_enabled'] = self.convert_to_bool(params['nfsv41_acl']) + if params.get('nfsv41_read_delegation') is not None: + body['protocol.v41_features.read_delegation_enabled'] = self.convert_to_bool(params['nfsv41_read_delegation']) + if params.get('nfsv41_write_delegation') is not None: + body['protocol.v41_features.write_delegation_enabled'] = self.convert_to_bool(params['nfsv41_write_delegation']) + if params.get('showmount') is not None: + body['showmount_enabled'] = self.convert_to_bool(params['showmount']) + # Tested this out, in both create and modify, changing the service_state will enable and disabled the service + # during both a create and modify. + if params.get('service_state') is not None: + body['enabled'] = self.convert_to_bool(params['service_state']) + if params.get('root') is not None: + body['root'] = params['root'] + if params.get('windows') is not None: + body['windows'] = params['windows'] + if params.get('security') is not None: + body['security'] = params['security'] + if params.get('tcp_max_xfer_size') is not None: + body['transport.tcp_max_transfer_size'] = params['tcp_max_xfer_size'] + return body + + def convert_to_bool(self, value): + return 'true' if value in ['enabled', 'started'] else 'false' + + def convert_from_bool(self, value): + return 'enabled' if value in ['true', True] else 'disabled' + + def convert_from_bool_to_started(self, value): + return 'started' if value in ['true', True] else 'stopped' + + def validate_modify(self, current, modify): + '''Earlier ONTAP versions do not support tcp_max_xfer_size''' + if 'tcp_max_xfer_size' in modify and current['tcp_max_xfer_size'] is None: + self.module.fail_json(msg='Error: tcp_max_xfer_size is not supported on ONTAP 9.3 or earlier.') + + def apply(self): + current = self.get_nfs_service() + if self.use_rest and current is not None: + self.svm_uuid = current.get('svm_uuid') + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = None + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if not self.use_rest: + self.validate_modify(current, modify) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_nfs_service() + elif cd_action == 'delete': + self.delete_nfs_service() + elif modify: + self.modify_nfs_service(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ Create object and call apply """ + obj = NetAppONTAPNFS() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py new file mode 100644 index 000000000..ced6f44be --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_node.py @@ -0,0 +1,265 @@ +#!/usr/bin/python + +# (c) 2018-2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_node +short_description: NetApp ONTAP Modify or Rename a node. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.7.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Modify or Rename an ONTAP node. +options: + name: + description: + - The name for the node + required: true + type: str + + from_name: + description: + - The name of the node to be renamed. If I(name) already exists, no action will be performed. + type: str + + location: + description: + - The location for the node + type: str + + asset_tag: + description: + - The asset tag for the node, not supported by REST + type: str + +''' + +EXAMPLES = """ +- name: modify node + na_ontap_node: + name: laurentncluster-2 + location: SF1 + asset_tag: mytag + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: rename node + na_ontap_node: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + from_name: laurentn-vsim1 + name: laurentncluster-2 + +- name: modify and rename node + na_ontap_node: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + location: SF2 + from_name: laurentn-vsim1 + name: laurentncluster-2 +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapNode(object): + """ + Rename and modify node + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + location=dict(required=False, type='str'), + asset_tag=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + + # some attributes are not supported in REST implementation + unsupported_rest_properties = ['asset_tag'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module) + return + + def update_node_details(self, uuid, modify): + api = 'cluster/nodes/%s' % uuid + data = {} + if 'from_name' in self.parameters: + data['name'] = self.parameters['name'] + if 'location' in self.parameters: + data['location'] = self.parameters['location'] + if not data: + self.module.fail_json(msg='Nothing to update in the modified attributes: %s' % modify) + response, error = self.rest_api.patch(api, body=data) + response, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api) + if error: + self.module.fail_json(msg='Error while modifying node details: %s' % error) + + def modify_node(self, modify=None, uuid=None): + """ + Modify an existing node + :return: none + """ + if self.use_rest: + self.update_node_details(uuid, modify) + else: + node_obj = netapp_utils.zapi.NaElement('system-node-modify') + node_obj.add_new_child('node', self.parameters['name']) + if 'location' in self.parameters: + node_obj.add_new_child('node-location', self.parameters['location']) + if 'asset_tag' in self.parameters: + node_obj.add_new_child('node-asset-tag', self.parameters['asset_tag']) + try: + self.cluster.invoke_successfully(node_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying node: %s' % + (to_native(error)), + exception=traceback.format_exc()) + + def rename_node(self): + """ + Rename an existing node + :return: none + """ + node_obj = netapp_utils.zapi.NaElement('system-node-rename') + node_obj.add_new_child('node', self.parameters['from_name']) + node_obj.add_new_child('new-name', self.parameters['name']) + try: + self.cluster.invoke_successfully(node_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming node: %s' % + (to_native(error)), + exception=traceback.format_exc()) + + def get_node(self, name): + if self.use_rest: + api = 'cluster/nodes' + query = { + 'fields': 'name,uuid,location', + 'name': name + } + message, error = self.rest_api.get(api, query) + node, error = rrh.check_for_0_or_1_records(api, message, error) + if error: + self.module.fail_json(msg='Error while fetching node details: %s' % error) + if node: + if 'location' not in message['records'][0]: + node_location = '' + else: + node_location = message['records'][0]['location'] + return dict( + name=message['records'][0]['name'], + uuid=message['records'][0]['uuid'], + location=node_location) + return None + else: + node_obj = netapp_utils.zapi.NaElement('system-node-get') + node_obj.add_new_child('node', name) + try: + result = self.cluster.invoke_successfully(node_obj, True) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) == "13115": + # 13115 (EINVALIDINPUTERROR) if the node does not exist + return None + else: + self.module.fail_json(msg=to_native( + error), exception=traceback.format_exc()) + attributes = result.get_child_by_name('attributes') + if attributes is not None: + node_info = attributes.get_child_by_name('node-details-info') + node_location = node_info.get_child_content('node-location') + node_location = node_location if node_location is not None else '' + node_tag = node_info.get_child_content('node-tag') + node_tag = node_tag if node_tag is not None else '' + return dict( + name=node_info['node'], + location=node_location, + asset_tag=node_tag) + return None + + def apply(self): + from_exists = None + modify = None + uuid = None + current = self.get_node(self.parameters['name']) + if current is None and 'from_name' in self.parameters: + from_exists = self.get_node(self.parameters['from_name']) + if from_exists is None: + self.module.fail_json(msg='Node not found: %s' % self.parameters['from_name']) + uuid = from_exists['uuid'] if 'uuid' in from_exists else None + # since from_exists contains the node name, modify will at least contain the node name if a rename is required. + modify = self.na_helper.get_modified_attributes(from_exists, self.parameters) + elif current is not None: + uuid = current['uuid'] if 'uuid' in current else None + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + allowed_options = ['name', 'location'] + if not self.use_rest: + allowed_options.append('asset_tag') + if modify and any(x not in allowed_options for x in modify): + self.module.fail_json(msg='Too many modified attributes found: %s, allowed: %s' % (modify, allowed_options)) + if current is None and from_exists is None: + msg = 'from_name: %s' % self.parameters.get('from_name') if 'from_name' in self.parameters \ + else 'name: %s' % self.parameters['name'] + self.module.fail_json(msg='Node not found: %s' % msg) + if self.na_helper.changed: + if not self.module.check_mode: + if not self.use_rest: + if 'name' in modify: + self.rename_node() + modify.pop('name') + if modify: + self.modify_node(modify, uuid) + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ + Start, Stop and Enable node services. + """ + obj = NetAppOntapNode() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py new file mode 100644 index 000000000..90c6cf655 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_dacl.py @@ -0,0 +1,355 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = """ +module: na_ontap_ntfs_dacl +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp Ontap create, delate or modify NTFS DACL (discretionary access control list) +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.4.0' +description: +- Create, modify, or destroy a NTFS DACL + +options: + state: + description: + - Whether the specified NTFS DACL should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver for the NTFS DACL. + required: true + type: str + + security_descriptor: + description: + - Specifies the NTFS security descriptor. + required: true + type: str + + access_type: + description: + - Specifies DACL ACE's access type. Possible values. + choices: ['allow', 'deny'] + required: true + type: str + + account: + description: + - Specifies DACL ACE's SID or domain account name of NTFS security descriptor. + required: true + type: str + + rights: + description: + - Specifies DACL ACE's access rights. Mutually exclusive with advanced_access_rights. + choices: ['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'] + type: str + + apply_to: + description: + - Specifies apply DACL entry. + choices: ['this_folder', 'sub_folders', 'files'] + type: list + elements: str + + advanced_access_rights: + description: + - Specifies DACL ACE's Advanced access rights. Mutually exclusive with rights. + choices: ['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea', 'execute_file', 'delete_child', + 'read_attr', 'write_attr', 'delete', 'read_perm', 'write_perm', 'write_owner', 'full_control'] + type: list + elements: str + +""" + +EXAMPLES = """ + - name: Add NTFS DACL + na_ontap_ntfs_dacl: + state: present + vserver: SVM1 + security_descriptor: ansible_sd + access_type: allow + account: DOMAIN\\Account + rights: modify + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + + - name: Modify NTFS DACL + na_ontap_ntfs_dacl: + state: present + vserver: SVM1 + security_descriptor: ansible_sd + access_type: full_control + account: DOMAIN\\Account + rights: modify + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Remove NTFS DACL + na_ontap_ntfs_dacl: + state: absent + vserver: SVM1 + security_descriptor: ansible_sd + account: DOMAIN\\Account + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapNtfsDacl(object): + """ + Creates, Modifies and Destroys an NTFS DACL + """ + + def __init__(self): + """ + Initialize the Ontap NTFS DACL class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + security_descriptor=dict(required=True, type='str'), + access_type=dict(required=True, choices=['allow', 'deny'], type='str'), + account=dict(required=True, type='str'), + rights=dict(required=False, + choices=['no_access', 'full_control', 'modify', 'read_and_execute', 'read', 'write'], + type='str'), + apply_to=dict(required=False, choices=['this_folder', 'sub_folders', 'files'], type='list', elements='str'), + advanced_access_rights=dict(required=False, + choices=['read_data', 'write_data', 'append_data', 'read_ea', 'write_ea', + 'execute_file', 'delete_child', 'read_attr', 'write_attr', 'delete', + 'read_perm', 'write_perm', 'write_owner', 'full_control'], + type='list', elements='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('rights', 'advanced_access_rights')], + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg='The python NetApp-Lib module is required') + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_dacl(self): + + dacl_entry = None + advanced_access_list = None + + dacl_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl-get-iter') + dacl_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs-dacl') + dacl_info.add_new_child('vserver', self.parameters['vserver']) + dacl_info.add_new_child('ntfs-sd', self.parameters['security_descriptor']) + dacl_info.add_new_child('access-type', self.parameters['access_type']) + dacl_info.add_new_child('account', self.parameters['account']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(dacl_info) + dacl_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(dacl_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching %s DACL for account %s for security descriptor %s: %s' % ( + self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], + to_native(error)), exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + + if attributes_list is None: + return None + + dacl = attributes_list.get_child_by_name('file-directory-security-ntfs-dacl') + + apply_to_list = [] + apply_to = dacl.get_child_by_name('apply-to') + for apply_child in apply_to.get_children(): + inheritance_level = apply_child.get_content() + + apply_to_list.append(inheritance_level) + + if dacl.get_child_by_name('advanced-rights'): + + advanced_access_list = [] + advanced_access = dacl.get_child_by_name('advanced-rights') + for right in advanced_access.get_children(): + advanced_access_right = right.get_content() + advanced_right = { + 'advanced_access_rights': advanced_access_right + } + advanced_access_list.append(advanced_right) + + dacl_entry = { + 'access_type': dacl.get_child_content('access-type'), + 'account': dacl.get_child_content('account'), + 'apply_to': apply_to_list, + 'security_descriptor': dacl.get_child_content('ntfs-sd'), + 'readable_access_rights': dacl.get_child_content('readable-access-rights'), + 'vserver': dacl.get_child_content('vserver'), + } + + if advanced_access_list is not None: + dacl_entry['advanced_rights'] = advanced_access_list + else: + dacl_entry['rights'] = dacl.get_child_content('rights') + return dacl_entry + + def add_dacl(self): + """ + Adds a new NTFS DACL to an existing NTFS security descriptor + """ + + dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-add") + dacl_obj.add_new_child("access-type", self.parameters['access_type']) + dacl_obj.add_new_child("account", self.parameters['account']) + dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor']) + + if 'rights' not in self.parameters.keys() and 'advanced_access_rights' not in self.parameters.keys(): + self.module.fail_json(msg='Either rights or advanced_access_rights must be specified.') + + if self.parameters.get('apply_to'): + apply_to_obj = netapp_utils.zapi.NaElement("apply-to") + + for apply_entry in self.parameters['apply_to']: + apply_to_obj.add_new_child('inheritance-level', apply_entry) + dacl_obj.add_child_elem(apply_to_obj) + + if self.parameters.get('advanced_access_rights'): + access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights") + + for right in self.parameters['advanced_access_rights']: + access_rights_obj.add_new_child('advanced-access-rights', right) + + dacl_obj.add_child_elem(access_rights_obj) + + if self.parameters.get('rights'): + dacl_obj.add_new_child("rights", self.parameters['rights']) + + try: + self.server.invoke_successfully(dacl_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding %s DACL for account %s for security descriptor %s: %s' % ( + self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)), + exception=traceback.format_exc()) + + def remove_dacl(self): + """ + Deletes a NTFS DACL from an existing NTFS security descriptor + """ + dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-remove") + dacl_obj.add_new_child("access-type", self.parameters['access_type']) + dacl_obj.add_new_child("account", self.parameters['account']) + dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor']) + + try: + self.server.invoke_successfully(dacl_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting %s DACL for account %s for security descriptor %s: %s' % ( + self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], to_native(error)), + exception=traceback.format_exc()) + + def modify_dacl(self): + """ + Modifies a NTFS DACL on an existing NTFS security descriptor + """ + + dacl_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-dacl-modify") + dacl_obj.add_new_child("access-type", self.parameters['access_type']) + dacl_obj.add_new_child("account", self.parameters['account']) + dacl_obj.add_new_child("ntfs-sd", self.parameters['security_descriptor']) + + if self.parameters.get('apply_to'): + apply_to_obj = netapp_utils.zapi.NaElement("apply-to") + + for apply_entry in self.parameters['apply_to']: + apply_to_obj.add_new_child('inheritance-level', apply_entry) + dacl_obj.add_child_elem(apply_to_obj) + + if self.parameters.get('advanced_access_rights'): + access_rights_obj = netapp_utils.zapi.NaElement("advanced-rights") + + for right in self.parameters['advanced_access_rights']: + access_rights_obj.add_new_child('advanced-access-rights', right) + + dacl_obj.add_child_elem(access_rights_obj) + + if self.parameters.get('rights'): + dacl_obj.add_new_child("rights", self.parameters['rights']) + + try: + self.server.invoke_successfully(dacl_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying %s DACL for account %s for security descriptor %s: %s' % ( + self.parameters['access_type'], self.parameters['account'], self.parameters['security_descriptor'], + to_native(error)), exception=traceback.format_exc()) + + def apply(self): + current, modify = self.get_dacl(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.add_dacl() + elif cd_action == 'delete': + self.remove_dacl() + elif modify: + self.modify_dacl() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap NTFS DACL object and runs the correct play task + """ + obj = NetAppOntapNtfsDacl() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py new file mode 100644 index 000000000..d0abf1e57 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntfs_sd.py @@ -0,0 +1,288 @@ +#!/usr/bin/python + +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = """ + +module: na_ontap_ntfs_sd +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp ONTAP create, delete or modify NTFS security descriptor +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.4.0' +description: + - Create, modify or destroy NTFS security descriptor + +options: + state: + description: + - Whether the specified NTFS security descriptor should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver for the NTFS security descriptor. + required: true + type: str + + name: + description: + - Specifies the NTFS security descriptor name. Not modifiable. + required: true + type: str + + owner: + description: + - Specifies the owner's SID or domain account of the NTFS security descriptor. + - Need to provide the full path of the owner. + type: str + + group: + description: + - Specifies the group's SID or domain account of the NTFS security descriptor. + - Need to provide the full path of the group. + required: false + type: str + + control_flags_raw: + description: + - Specifies the security descriptor control flags. + - 1... .... .... .... = Self Relative + - .0.. .... .... .... = RM Control Valid + - ..0. .... .... .... = SACL Protected + - ...0 .... .... .... = DACL Protected + - .... 0... .... .... = SACL Inherited + - .... .0.. .... .... = DACL Inherited + - .... ..0. .... .... = SACL Inherit Required + - .... ...0 .... .... = DACL Inherit Required + - .... .... ..0. .... = SACL Defaulted + - .... .... ...0 .... = SACL Present + - .... .... .... 0... = DACL Defaulted + - .... .... .... .1.. = DACL Present + - .... .... .... ..0. = Group Defaulted + - .... .... .... ...0 = Owner Defaulted + - At present only the following flags are honored. Others are ignored. + - ..0. .... .... .... = SACL Protected + - ...0 .... .... .... = DACL Protected + - .... .... ..0. .... = SACL Defaulted + - .... .... .... 0... = DACL Defaulted + - .... .... .... ..0. = Group Defaulted + - .... .... .... ...0 = Owner Defaulted + - Convert the 16 bit binary flags and convert to decimal for the input. + type: int + +""" + +EXAMPLES = """ + - name: Create NTFS Security Descriptor + na_ontap_ntfs_sd: + state: present + vserver: SVM1 + name: ansible_sd + owner: DOMAIN\\Account + group: DOMAIN\\Group + control_flags_raw: 0 + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Modify NTFS Security Descriptor + na_ontap_ntfs_sd: + state: present + vserver: SVM1 + name: ansible_sd + owner: DOMAIN\\Account + group: DOMAIN\\Group + control_flags_raw: 0 + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Delete NTFS Security Descriptor + na_ontap_ntfs_sd: + state: absent + vserver: SVM1 + name: ansible_sd + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + + +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapNtfsSd(object): + """ + Creates, Modifies and Destroys a NTFS security descriptor + """ + + def __init__(self): + """ + Initialize the Ontap NTFS Security Descriptor class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + owner=dict(required=False, type='str'), + group=dict(required=False, type='str'), + control_flags_raw=dict(required=False, type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg='The python NetApp-Lib module is required') + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_ntfs_sd(self): + + ntfs_sd_entry, result = None, None + + ntfs_sd_get_iter = netapp_utils.zapi.NaElement('file-directory-security-ntfs-get-iter') + ntfs_sd_info = netapp_utils.zapi.NaElement('file-directory-security-ntfs') + ntfs_sd_info.add_new_child('vserver', self.parameters['vserver']) + ntfs_sd_info.add_new_child('ntfs-sd', self.parameters['name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(ntfs_sd_info) + ntfs_sd_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(ntfs_sd_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching NTFS security descriptor %s : %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + ntfs_sd = attributes_list.get_child_by_name('file-directory-security-ntfs') + ntfs_sd_entry = { + 'vserver': ntfs_sd.get_child_content('vserver'), + 'name': ntfs_sd.get_child_content('ntfs-sd'), + 'owner': ntfs_sd.get_child_content('owner'), + 'group': ntfs_sd.get_child_content('group'), + 'control_flags_raw': ntfs_sd.get_child_content('control-flags-raw'), + } + if ntfs_sd_entry.get('control_flags_raw'): + ntfs_sd_entry['control_flags_raw'] = int(ntfs_sd_entry['control_flags_raw']) + return ntfs_sd_entry + return None + + def add_ntfs_sd(self): + """ + Adds a new NTFS security descriptor + """ + + ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-create") + ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name']) + + if self.parameters.get('control_flags_raw') is not None: + ntfs_sd_obj.add_new_child("control-flags-raw", str(self.parameters['control_flags_raw'])) + + if self.parameters.get('owner'): + ntfs_sd_obj.add_new_child("owner", self.parameters['owner']) + + if self.parameters.get('group'): + ntfs_sd_obj.add_new_child("group", self.parameters['group']) + + try: + self.server.invoke_successfully(ntfs_sd_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error creating NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def remove_ntfs_sd(self): + """ + Deletes a NTFS security descriptor + """ + ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-delete") + ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name']) + try: + self.server.invoke_successfully(ntfs_sd_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_ntfs_sd(self): + """ + Modifies a NTFS security descriptor + """ + + ntfs_sd_obj = netapp_utils.zapi.NaElement("file-directory-security-ntfs-modify") + ntfs_sd_obj.add_new_child("ntfs-sd", self.parameters['name']) + + if self.parameters.get('control_flags_raw') is not None: + ntfs_sd_obj.add_new_child('control-flags-raw', str(self.parameters['control_flags_raw'])) + + if self.parameters.get('owner'): + ntfs_sd_obj.add_new_child('owner', self.parameters['owner']) + + if self.parameters.get('group'): + ntfs_sd_obj.add_new_child('group', self.parameters['group']) + + try: + self.server.invoke_successfully(ntfs_sd_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error modifying NTFS security descriptor %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current, modify = self.get_ntfs_sd(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.add_ntfs_sd() + elif cd_action == 'delete': + self.remove_ntfs_sd() + elif modify: + self.modify_ntfs_sd() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates, deletes and modifies NTFS secudity descriptor + """ + obj = NetAppOntapNtfsSd() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py new file mode 100644 index 000000000..dc8a000e8 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp.py @@ -0,0 +1,271 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_ontap_ntp +short_description: NetApp ONTAP NTP server +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create or delete or modify NTP server in ONTAP +options: + state: + description: + - Whether the specified NTP server should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + server_name: + description: + - The name of the NTP server to manage. + required: True + type: str + version: + description: + - give version for NTP server + choices: ['auto', '3', '4'] + default: 'auto' + type: str + key_id: + description: + - The symmetric authentication key ID being used for this time server. + type: int + version_added: 21.21.0 +""" + +EXAMPLES = """ + - name: Create NTP server + na_ontap_ntp: + state: present + version: auto + key_id: 1 + server_name: "{{ server_name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete NTP server + na_ontap_ntp: + state: absent + server_name: "{{ server_name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapNTPServer: + """ object initialize and class methods """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + server_name=dict(required=True, type='str'), + version=dict(required=False, type='str', default='auto', + choices=['auto', '3', '4']), + key_id=dict(required=False, type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7): + msg = 'REST requires ONTAP 9.7 or later for na_ontap_ntp' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_ntp_server(self): + """ + Return details about the ntp server + :param: + name : Name of the server_name + :return: Details about the ntp server. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_ntp_server_rest() + ntp_iter = netapp_utils.zapi.NaElement('ntp-server-get-iter') + ntp_info = netapp_utils.zapi.NaElement('ntp-server-info') + ntp_info.add_new_child('server-name', self.parameters['server_name']) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(ntp_info) + + ntp_iter.add_child_elem(query) + result = self.server.invoke_successfully(ntp_iter, True) + return_value = None + + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + + ntp_server_name = result.get_child_by_name('attributes-list').\ + get_child_by_name('ntp-server-info').\ + get_child_content('server-name') + server_version = result.get_child_by_name('attributes-list').\ + get_child_by_name('ntp-server-info').\ + get_child_content('version') + server_key_id = result.get_child_by_name('attributes-list').\ + get_child_by_name('ntp-server-info').\ + get_child_content('key-id') + return_value = { + 'server-name': ntp_server_name, + 'version': server_version, + 'key_id': int(server_key_id) if server_key_id is not None else 0, + } + + return return_value + + def get_ntp_server_rest(self): + api = 'cluster/ntp/servers' + options = {'server': self.parameters['server_name'], + 'fields': 'server,version,key.id'} + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg=error) + if record: + return { + 'server': self.na_helper.safe_get(record, ['server']), + 'version': self.na_helper.safe_get(record, ['version']), + 'key_id': self.na_helper.safe_get(record, ['key', 'id']), + } + return None + + def create_ntp_server(self): + """ + create ntp server. + """ + if self.use_rest: + return self.create_ntp_server_rest() + ntp_server_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'ntp-server-create', **{'server-name': self.parameters['server_name'], + 'version': self.parameters['version'] + }) + if self.parameters.get('key_id'): + ntp_server_create.add_new_child("key-id", str(self.parameters['key_id'])) + + try: + self.server.invoke_successfully(ntp_server_create, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating ntp server %s: %s' + % (self.parameters['server_name'], to_native(error)), + exception=traceback.format_exc()) + + def create_ntp_server_rest(self): + api = 'cluster/ntp/servers' + params = { + 'server': self.parameters['server_name'], + 'version': self.parameters['version'] + } + if self.parameters.get('key_id'): + params['key'] = {'id': self.parameters['key_id']} + dummy, error = rest_generic.post_async(self.rest_api, api, params) + if error: + self.module.fail_json(msg=error) + + def delete_ntp_server(self): + """ + delete ntp server. + """ + if self.use_rest: + return self.delete_ntp_server_rest() + ntp_server_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'ntp-server-delete', **{'server-name': self.parameters['server_name']}) + + try: + self.server.invoke_successfully(ntp_server_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting ntp server %s: %s' + % (self.parameters['server_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_ntp_server_rest(self): + dummy, error = rest_generic.delete_async(self.rest_api, 'cluster/ntp/servers', self.parameters['server_name']) + if error: + self.module.fail_json(msg=error) + + def modify_ntp_server(self, modify): + """ + modify the ntp server + """ + if self.use_rest: + return self.modify_ntp_server_rest(modify) + ntp_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'ntp-server-modify', + **{'server-name': self.parameters['server_name'], 'version': self.parameters['version']}) + if modify.get('key_id'): + ntp_modify.add_new_child("key-id", str(self.parameters['key_id'])) + try: + self.server.invoke_successfully(ntp_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying version for ntp server %s: %s' + % (self.parameters['server_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_ntp_server_rest(self, modify): + body = {} + if modify.get('version'): + body['version'] = modify['version'] + if modify.get('key_id'): + body['key'] = {'id': modify['key_id']} + if body: + dummy, error = rest_generic.patch_async(self.rest_api, 'cluster/ntp/servers', self.parameters['server_name'], body) + if error: + self.module.fail_json(msg=error) + + def apply(self): + """Apply action to ntp-server""" + + modify = None + current = self.get_ntp_server() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_ntp_server() + elif cd_action == 'delete': + self.delete_ntp_server() + elif modify: + self.modify_ntp_server(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ Create object and call apply """ + ntp_obj = NetAppOntapNTPServer() + ntp_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py new file mode 100644 index 000000000..cba2754dc --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ntp_key.py @@ -0,0 +1,159 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_ontap_ntp_key +short_description: NetApp ONTAP NTP key +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.21.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create or delete or modify NTP key in ONTAP +options: + state: + description: + - Whether the specified NTP key should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + id: + description: + - NTP symmetric authentication key ID. The ID must be in the range 1 to 65535. + required: True + type: int + digest_type: + description: + - NTP symmetric authentication key type. Only SHA1 currently supported. + choices: ['sha1'] + type: str + required: True + value: + description: + - NTP symmetric authentication key value. The value must be exactly 40 hexadecimal digits for SHA1 keys. + type: str + required: True +""" + +EXAMPLES = """ + - name: Create NTP key + na_ontap_ntp_key: + state: present + digest_type: sha1 + value: "{{ key_value }}" + id: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete NTP key + na_ontap_ntp_key: + state: absent + id: 1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapNTPKey: + """ object initialize and class methods """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + id=dict(required=True, type='int'), + digest_type=dict(required=True, type='str', choices=['sha1']), + value=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_ntp_key', 9, 7) + + def get_ntp_key(self): + api = 'cluster/ntp/keys' + options = {'id': self.parameters['id'], + 'fields': 'id,digest_type,value'} + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg='Error fetching key with id %s: %s' % (self.parameters['id'], to_native(error)), + exception=traceback.format_exc()) + return record + + def create_ntp_key(self): + api = 'cluster/ntp/keys' + params = { + 'id': self.parameters['id'], + 'digest_type': self.parameters['digest_type'], + 'value': self.parameters['value'] + } + dummy, error = rest_generic.post_async(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error creating key with id %s: %s' % (self.parameters['id'], to_native(error)), + exception=traceback.format_exc()) + + def delete_ntp_key(self): + dummy, error = rest_generic.delete_async(self.rest_api, 'cluster/ntp/keys', str(self.parameters['id'])) + if error: + self.module.fail_json(msg='Error deleting key with id %s: %s' % (self.parameters['id'], to_native(error)), + exception=traceback.format_exc()) + + def modify_ntp_key(self, modify): + body = {} + if 'digest_type' in modify: + body['digest_type'] = self.parameters['digest_type'] + if 'value' in modify: + body['value'] = self.parameters['value'] + if body: + dummy, error = rest_generic.patch_async(self.rest_api, 'cluster/ntp/keys', str(self.parameters['id']), body) + if error: + self.module.fail_json(msg='Error modifying key with id %s: %s' % (self.parameters['id'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + cd_action = None + current = self.get_ntp_key() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_ntp_key() + elif cd_action == 'delete': + self.delete_ntp_key() + elif modify: + self.modify_ntp_key(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ Create object and call apply """ + ntp_obj = NetAppOntapNTPKey() + ntp_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py new file mode 100644 index 000000000..3026a1781 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme.py @@ -0,0 +1,250 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete NVMe Service +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_nvme +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified NVMe should exist or not. + default: present + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + status_admin: + description: + - Whether the status of NVMe should be up or down + type: bool +short_description: "NetApp ONTAP Manage NVMe Service" +version_added: 2.8.0 +''' + +EXAMPLES = """ + + - name: Create NVMe + netapp.ontap.na_ontap_nvme: + state: present + status_admin: False + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Modify NVMe + netapp.ontap.na_ontap_nvme: + state: present + status_admin: True + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Delete NVMe + netapp.ontap.na_ontap_nvme: + state: absent + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPNVMe: + """ + Class with NVMe service methods + """ + + def __init__(self): + self.svm_uuid = None + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + status_admin=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_nvme(self): + """ + Get current nvme details + :return: dict if nvme exists, None otherwise + """ + if self.use_rest: + return self.get_nvme_rest() + nvme_get = netapp_utils.zapi.NaElement('nvme-get-iter') + query = { + 'query': { + 'nvme-target-service-info': { + 'vserver': self.parameters['vserver'] + } + } + } + nvme_get.translate_struct(query) + try: + result = self.server.invoke_successfully(nvme_get, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching nvme info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attributes_list = result.get_child_by_name('attributes-list') + nvme_info = attributes_list.get_child_by_name('nvme-target-service-info') + return {'status_admin': self.na_helper.get_value_for_bool(True, nvme_info.get_child_content('is-available'))} + return None + + def create_nvme(self): + """ + Create NVMe service + """ + if self.use_rest: + return self.create_nvme_rest() + nvme_create = netapp_utils.zapi.NaElement('nvme-create') + if self.parameters.get('status_admin') is not None: + options = {'is-available': self.na_helper.get_value_for_bool(False, self.parameters['status_admin'])} + nvme_create.translate_struct(options) + try: + self.server.invoke_successfully(nvme_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating nvme for vserver %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def delete_nvme(self): + """ + Delete NVMe service + """ + if self.use_rest: + return self.delete_nvme_rest() + nvme_delete = netapp_utils.zapi.NaElement('nvme-delete') + try: + self.server.invoke_successfully(nvme_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting nvme for vserver %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def modify_nvme(self, status=None): + """ + Modify NVMe service + """ + if status is None: + status = self.parameters['status_admin'] + if self.use_rest: + return self.modify_nvme_rest(status) + options = {'is-available': status} + nvme_modify = netapp_utils.zapi.NaElement('nvme-modify') + nvme_modify.translate_struct(options) + try: + self.server.invoke_successfully(nvme_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying nvme for vserver %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def get_nvme_rest(self): + api = 'protocols/nvme/services' + params = {'svm.name': self.parameters['vserver'], 'fields': 'enabled'} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching nvme info for vserver: %s' % self.parameters['vserver']) + if record: + self.svm_uuid = record['svm']['uuid'] + record['status_admin'] = record.pop('enabled') + return record + return None + + def create_nvme_rest(self): + api = 'protocols/nvme/services' + body = {'svm.name': self.parameters['vserver']} + if self.parameters.get('status_admin') is not None: + body['enabled'] = self.parameters['status_admin'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating nvme for vserver %s: %s' % (self.parameters['vserver'], + to_native(error)), + exception=traceback.format_exc()) + + def delete_nvme_rest(self): + api = 'protocols/nvme/services' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.svm_uuid) + if error: + self.module.fail_json(msg='Error deleting nvme for vserver %s: %s' % (self.parameters['vserver'], + to_native(error)), + exception=traceback.format_exc()) + + def modify_nvme_rest(self, status): + if status == 'false': + status = False + api = 'protocols/nvme/services' + dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, {'enabled': status}) + if error: + self.module.fail_json(msg='Error modifying nvme for vserver: %s' % self.parameters['vserver']) + + def apply(self): + """ + Apply action to NVMe service + """ + modify = None + current = self.get_nvme() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.parameters.get('status_admin') is not None: + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_nvme() + elif cd_action == 'delete': + # NVMe status_admin needs to be down before deleting it + self.modify_nvme('false') + self.delete_nvme() + elif modify: + self.modify_nvme() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + community_obj = NetAppONTAPNVMe() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py new file mode 100644 index 000000000..d328c4a0c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_namespace.py @@ -0,0 +1,256 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete NVME namespace +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_nvme_namespace +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified namespace should exist or not. + default: present + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + ostype: + description: + - Specifies the ostype for initiators + choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v'] + type: str + size: + description: + - Size in bytes. + Range is [0..2^63-1]. + type: int + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + type: str + default: 'b' + path: + description: + - Namespace path. + required: true + type: str + block_size: + description: + - Size in bytes of a logical block. Possible values are 512 (Data ONTAP 9.6 and later), 4096. The default value is 4096. + choices: [512, 4096] + type: int + version_added: '20.5.0' +short_description: "NetApp ONTAP Manage NVME Namespace" +version_added: 2.8.0 +''' + +EXAMPLES = """ + + - name: Create NVME Namespace + netapp.ontap.na_ontap_nvme_namespace: + state: present + ostype: linux + path: /vol/ansible/test + size: 20 + size_unit: mb + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Create NVME Namespace (Idempotency) + netapp.ontap.na_ontap_nvme_namespace: + state: present + ostype: linux + path: /vol/ansible/test + size: 20 + size_unit: mb + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPNVMENamespace: + """ + Class with NVME namespace methods + """ + + def __init__(self): + + self.namespace_uuid = None + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']), + path=dict(required=True, type='str'), + size=dict(required=False, type='int'), + size_unit=dict(default='b', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'), + block_size=dict(required=False, choices=[512, 4096], type='int') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[('state', 'present', ['ostype', 'size'])], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if self.parameters.get('size'): + self.parameters['size'] = self.parameters['size'] * \ + netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']] + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_namespace(self): + """ + Get current namespace details + :return: dict if namespace exists, None otherwise + """ + if self.use_rest: + return self.get_namespace_rest() + namespace_get = netapp_utils.zapi.NaElement('nvme-namespace-get-iter') + query = { + 'query': { + 'nvme-namespace-info': { + 'path': self.parameters['path'], + 'vserver': self.parameters['vserver'] + } + } + } + namespace_get.translate_struct(query) + try: + result = self.server.invoke_successfully(namespace_get, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching namespace info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return result + return None + + def create_namespace(self): + """ + Create a NVME Namespace + """ + if self.use_rest: + return self.create_namespace_rest() + options = {'path': self.parameters['path'], + 'ostype': self.parameters['ostype'], + 'size': self.parameters['size'] + } + if self.parameters.get('block_size'): + options['block-size'] = self.parameters['block_size'] + namespace_create = netapp_utils.zapi.NaElement('nvme-namespace-create') + namespace_create.translate_struct(options) + try: + self.server.invoke_successfully(namespace_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating namespace for path %s: %s' + % (self.parameters.get('path'), to_native(error)), + exception=traceback.format_exc()) + + def delete_namespace(self): + """ + Delete a NVME Namespace + """ + if self.use_rest: + return self.delete_namespace_rest() + options = {'path': self.parameters['path'] + } + namespace_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-namespace-delete', **options) + try: + self.server.invoke_successfully(namespace_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting namespace for path %s: %s' + % (self.parameters.get('path'), to_native(error)), + exception=traceback.format_exc()) + + def get_namespace_rest(self): + api = 'storage/namespaces' + params = { + 'svm.name': self.parameters['vserver'], + 'name': self.parameters['path'], + 'fields': 'enabled' + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching namespace info for vserver: %s' % self.parameters['vserver']) + if record: + self.namespace_uuid = record['uuid'] + return record + return None + + def create_namespace_rest(self): + api = 'storage/namespaces' + body = {'svm.name': self.parameters['vserver'], + 'os_type': self.parameters['ostype'], + 'name': self.parameters['path'], + 'space.size': self.parameters['size']} + if self.parameters.get('block_size') is not None: + body['space.block_size'] = self.parameters['block_size'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating namespace for vserver %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def delete_namespace_rest(self): + api = 'storage/namespaces' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.namespace_uuid) + if error: + self.module.fail_json(msg='Error deleting namespace for vserver %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + """ + Apply action to NVME Namespace + """ + current = self.get_namespace() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_namespace() + elif cd_action == 'delete': + self.delete_namespace() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + community_obj = NetAppONTAPNVMENamespace() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py new file mode 100644 index 000000000..7d76a81a7 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_nvme_subsystem.py @@ -0,0 +1,463 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete NVME subsystem + - Associate(modify) host/map to NVME subsystem + - NVMe service should be existing in the data vserver with NVMe protocol as a pre-requisite +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_nvme_subsystem +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified subsystem should exist or not. + default: present + type: str + vserver: + description: + - Name of the vserver to use. + required: true + type: str + subsystem: + description: + - Specifies the subsystem + required: true + type: str + ostype: + description: + - Specifies the ostype for initiators + choices: ['windows', 'linux', 'vmware', 'xen', 'hyper_v'] + type: str + skip_host_check: + description: + - Skip host check + - Required to delete an NVMe Subsystem with attached NVMe namespaces + default: false + type: bool + skip_mapped_check: + description: + - Skip mapped namespace check + - Required to delete an NVMe Subsystem with attached NVMe namespaces + default: false + type: bool + hosts: + description: + - List of host NQNs (NVMe Qualification Name) associated to the controller. + type: list + elements: str + paths: + description: + - List of Namespace paths to be associated with the subsystem. + type: list + elements: str +short_description: "NetApp ONTAP Manage NVME Subsystem" +version_added: 2.8.0 +''' + +EXAMPLES = """ + + - name: Create NVME Subsystem + netapp.ontap.na_ontap_nvme_subsystem: + state: present + subsystem: test_sub + vserver: test_dest + ostype: linux + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete NVME Subsystem + netapp.ontap.na_ontap_nvme_subsystem: + state: absent + subsystem: test_sub + vserver: test_dest + skip_host_check: True + skip_mapped_check: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Associate NVME Subsystem host/map + netapp.ontap.na_ontap_nvme_subsystem: + state: present + subsystem: "{{ subsystem }}" + ostype: linux + hosts: nqn.1992-08.com.netapp:sn.3017cfc1e2ba11e89c55005056b36338:subsystem.ansible + paths: /vol/ansible/test,/vol/ansible/test1 + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Modify NVME subsystem map + netapp.ontap.na_ontap_nvme_subsystem: + state: present + subsystem: test_sub + vserver: test_dest + skip_host_check: True + skip_mapped_check: True + paths: /vol/ansible/test + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPNVMESubsystem: + """ + Class with NVME subsytem methods + """ + + def __init__(self): + + self.subsystem_uuid = None + self.namespace_list = [] + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + subsystem=dict(required=True, type='str'), + ostype=dict(required=False, type='str', choices=['windows', 'linux', 'vmware', 'xen', 'hyper_v']), + skip_host_check=dict(required=False, type='bool', default=False), + skip_mapped_check=dict(required=False, type='bool', default=False), + hosts=dict(required=False, type='list', elements='str'), + paths=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_subsystem(self): + """ + Get current subsystem details + :return: dict if subsystem exists, None otherwise + """ + if self.use_rest: + return self.get_subsystem_rest() + result = self.get_zapi_info('nvme-subsystem-get-iter', 'nvme-subsystem-info') + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return True + return None + + def create_subsystem(self): + """ + Create a NVME Subsystem + """ + if self.use_rest: + return self.create_subsystem_rest() + options = {'subsystem': self.parameters['subsystem'], + 'ostype': self.parameters['ostype'] + } + subsystem_create = netapp_utils.zapi.NaElement('nvme-subsystem-create') + subsystem_create.translate_struct(options) + try: + self.server.invoke_successfully(subsystem_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating subsystem for %s: %s' + % (self.parameters.get('subsystem'), to_native(error)), + exception=traceback.format_exc()) + + def delete_subsystem(self): + """ + Delete a NVME subsystem + """ + if self.use_rest: + return self.delete_subsystem_rest() + options = {'subsystem': self.parameters['subsystem'], + 'skip-host-check': 'true' if self.parameters.get('skip_host_check') else 'false', + 'skip-mapped-check': 'true' if self.parameters.get('skip_mapped_check') else 'false', + } + subsystem_delete = netapp_utils.zapi.NaElement.create_node_with_children('nvme-subsystem-delete', **options) + try: + self.server.invoke_successfully(subsystem_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting subsystem for %s: %s' + % (self.parameters.get('subsystem'), to_native(error)), + exception=traceback.format_exc()) + + def get_subsystem_host_map(self, type): + """ + Get current subsystem host details + :return: list if host exists, None otherwise + """ + if type == 'hosts': + zapi_get, zapi_info, zapi_type = 'nvme-subsystem-host-get-iter', 'nvme-target-subsystem-host-info', 'host-nqn' + elif type == 'paths': + zapi_get, zapi_info, zapi_type = 'nvme-subsystem-map-get-iter', 'nvme-target-subsystem-map-info', 'path' + result = self.get_zapi_info(zapi_get, zapi_info, zapi_type) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + attrs_list = result.get_child_by_name('attributes-list') + return_list = [item[zapi_type] for item in attrs_list.get_children()] + return {type: return_list} + return None + + def get_zapi_info(self, zapi_get_method, zapi_info, zapi_type=None): + subsystem_get = netapp_utils.zapi.NaElement(zapi_get_method) + query = { + 'query': { + zapi_info: { + 'subsystem': self.parameters.get('subsystem'), + 'vserver': self.parameters.get('vserver') + } + } + } + subsystem_get.translate_struct(query) + qualifier = " %s" % zapi_type if zapi_type else "" + try: + result = self.server.invoke_successfully(subsystem_get, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching subsystem%s info: %s' % (qualifier, to_native(error)), + exception=traceback.format_exc()) + return result + + def add_subsystem_host_map(self, data, type): + """ + Add a NVME Subsystem host/map + :param: data: list of hosts/paths to be added + :param: type: hosts/paths + """ + if type == 'hosts': + zapi_add, zapi_type = 'nvme-subsystem-host-add', 'host-nqn' + elif type == 'paths': + zapi_add, zapi_type = 'nvme-subsystem-map-add', 'path' + + for item in data: + options = {'subsystem': self.parameters['subsystem'], + zapi_type: item + } + subsystem_add = netapp_utils.zapi.NaElement.create_node_with_children(zapi_add, **options) + try: + self.server.invoke_successfully(subsystem_add, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding %s for subsystem %s: %s' + % (item, self.parameters.get('subsystem'), to_native(error)), + exception=traceback.format_exc()) + + def remove_subsystem_host_map(self, data, type): + """ + Remove a NVME Subsystem host/map + :param: data: list of hosts/paths to be added + :param: type: hosts/paths + """ + if type == 'hosts': + zapi_remove, zapi_type = 'nvme-subsystem-host-remove', 'host-nqn' + elif type == 'paths': + zapi_remove, zapi_type = 'nvme-subsystem-map-remove', 'path' + + for item in data: + options = {'subsystem': self.parameters['subsystem'], + zapi_type: item + } + subsystem_remove = netapp_utils.zapi.NaElement.create_node_with_children(zapi_remove, **options) + try: + self.server.invoke_successfully(subsystem_remove, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing %s for subsystem %s: %s' + % (item, self.parameters.get('subsystem'), to_native(error)), + exception=traceback.format_exc()) + + def associate_host_map(self, types): + """ + Check if there are hosts or paths to be associated with the subsystem + """ + action_add_dict = {} + action_remove_dict = {} + for type in types: + current = None + if self.parameters.get(type): + if self.use_rest: + if self.subsystem_uuid: + current = self.get_subsystem_host_map_rest(type) + else: + current = self.get_subsystem_host_map(type) + if current: + add_items = self.na_helper.\ + get_modified_attributes(current, self.parameters, get_list_diff=True).get(type) + remove_items = [item for item in current[type] if item not in self.parameters.get(type)] + else: + add_items = self.parameters[type] + remove_items = {} + if add_items: + action_add_dict[type] = add_items + self.na_helper.changed = True + if remove_items: + action_remove_dict[type] = remove_items + self.na_helper.changed = True + return action_add_dict, action_remove_dict + + def modify_host_map(self, add_host_map, remove_host_map): + for type, data in sorted(add_host_map.items()): + if self.use_rest: + self.add_subsystem_host_map_rest(data, type) + else: + self.add_subsystem_host_map(data, type) + for type, data in sorted(remove_host_map.items()): + if self.use_rest: + self.remove_subsystem_host_map_rest(data, type) + else: + self.remove_subsystem_host_map(data, type) + + def get_subsystem_rest(self): + api = 'protocols/nvme/subsystems' + params = {'svm.name': self.parameters['vserver'], 'name': self.parameters['subsystem']} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + if self.na_helper.ignore_missing_vserver_on_delete(error): + return None + self.module.fail_json(msg='Error fetching subsystem info for vserver: %s, %s' % (self.parameters['vserver'], to_native(error))) + if record: + self.subsystem_uuid = record['uuid'] + return record + return None + + def get_subsystem_host_map_rest(self, type): + if type == 'hosts': + api = 'protocols/nvme/subsystems/%s/hosts' % self.subsystem_uuid + records, error = rest_generic.get_0_or_more_records(self.rest_api, api) + if error: + self.module.fail_json(msg='Error fetching subsystem host info for vserver: %s: %s' % (self.parameters['vserver'], to_native(error))) + if records is not None: + return {type: [record['nqn'] for record in records]} + return None + if type == 'paths': + api = 'protocols/nvme/subsystem-maps' + query = {'svm.name': self.parameters['vserver'], 'subsystem.name': self.parameters['subsystem']} + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching subsystem map info for vserver: %s: %s' % (self.parameters['vserver'], to_native(error))) + if records is not None: + return_list = [] + for each in records: + return_list.append(each['namespace']['name']) + self.namespace_list.append(each['namespace']) + return {type: return_list} + return None + + def add_subsystem_host_map_rest(self, data, type): + if type == 'hosts': + records = [{'nqn': item} for item in data] + api = 'protocols/nvme/subsystems/%s/hosts' % self.subsystem_uuid + body = {'records': records} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json( + msg='Error adding %s for subsystem %s: %s' % (records, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc()) + elif type == 'paths': + api = 'protocols/nvme/subsystem-maps' + for item in data: + body = {'subsystem.name': self.parameters['subsystem'], + 'svm.name': self.parameters['vserver'], + 'namespace.name': item + } + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json( + msg='Error adding %s for subsystem %s: %s' % (item, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc()) + + def remove_subsystem_host_map_rest(self, data, type): + if type == 'hosts': + for item in data: + api = 'protocols/nvme/subsystems/%s/hosts/%s' % (self.subsystem_uuid, item) + dummy, error = rest_generic.delete_async(self.rest_api, api, None) + if error: + self.module.fail_json(msg='Error removing %s for subsystem %s: %s' + % (item, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc()) + elif type == 'paths': + for item in data: + namespace_uuid = None + for each in self.namespace_list: + if each['name'] == item: + namespace_uuid = each['uuid'] + api = 'protocols/nvme/subsystem-maps/%s/%s' % (self.subsystem_uuid, namespace_uuid) + body = {'subsystem.name': self.parameters['subsystem'], + 'svm.name': self.parameters['vserver'], + 'namespace.name': item + } + dummy, error = rest_generic.delete_async(self.rest_api, api, None, body=body) + if error: + self.module.fail_json(msg='Error removing %s for subsystem %s: %s' + % (item, self.parameters['subsystem'], to_native(error)), exception=traceback.format_exc()) + + def create_subsystem_rest(self): + api = 'protocols/nvme/subsystems' + body = {'svm.name': self.parameters['vserver'], + 'os_type': self.parameters['ostype'], + 'name': self.parameters['subsystem']} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating subsystem for vserver %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def delete_subsystem_rest(self): + api = 'protocols/nvme/subsystems' + body = {'allow_delete_while_mapped': 'true' if self.parameters.get('skip_mapped_check') else 'false', + 'allow_delete_with_hosts': 'true' if self.parameters.get('skip_host_check') else 'false'} + dummy, error = rest_generic.delete_async(self.rest_api, api, self.subsystem_uuid, body=body) + if error: + self.module.fail_json(msg='Error deleting subsystem for vserver %s: %s' % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + """ + Apply action to NVME subsystem + """ + types = ['hosts', 'paths'] + current = self.get_subsystem() + add_host_map, remove_host_map = dict(), dict() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('ostype') is None: + self.module.fail_json(msg="Error: Missing required parameter 'ostype' for creating subsystem") + if cd_action != 'delete' and self.parameters['state'] == 'present': + add_host_map, remove_host_map = self.associate_host_map(types) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_subsystem() + self.get_subsystem() + self.modify_host_map(add_host_map, remove_host_map) + elif cd_action == 'delete': + self.delete_subsystem() + elif cd_action is None: + self.modify_host_map(add_host_map, remove_host_map) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + community_obj = NetAppONTAPNVMESubsystem() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py new file mode 100644 index 000000000..32b3e7631 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_object_store.py @@ -0,0 +1,360 @@ +#!/usr/bin/python + +# (c) 2019-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_object_store +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_object_store +short_description: NetApp ONTAP manage object store config. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or delete object store config on ONTAP. + +options: + + state: + description: + - Whether the specified object store config should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + required: true + description: + - The name of the object store config to manage. + type: str + + provider_type: + description: + - The name of the object store config provider. + type: str + + server: + description: + - Fully qualified domain name of the object store config. + type: str + + port: + description: + - Port number of the object store that ONTAP uses when establishing a connection. + type: int + version_added: 21.9.0 + + container: + description: + - Data bucket/container name used in S3 requests. + type: str + + access_key: + description: + - Access key ID for AWS_S3 and SGWS provider types. + type: str + + secret_password: + description: + - Secret access key for AWS_S3 and SGWS provider types. + type: str + + certificate_validation_enabled: + description: + - Is SSL/TLS certificate validation enabled? + - If not specified, ONTAP will default to true. + type: bool + version_added: 21.9.0 + + ssl_enabled: + description: + - Is SSL enabled? + - If not specified, ONTAP will default to true. + type: bool + version_added: 21.9.0 + + change_password: + description: + - By default, the secret_password is used on create but ignored if the resource already exists. + - If set to true, the module always attempt to change the paswword as it cannot read the current value. + - When this is set to true, the module is not idempotent. + type: bool + default: false + version_added: 21.13.0 + + owner: + description: + - Owner of the target. Cannot be modifed. + - With REST, allowed values are fabricpool or snapmirror. A target can be used by only one feature. + - With ZAPI, the only allowed value is fabricpool. + - If absent, fabricpool is assumed on creation. + type: str + version_added: 21.13.0 +''' + +EXAMPLES = """ +- name: object store Create + netapp.ontap.na_ontap_object_store: + state: present + name: ansible + provider_type: SGWS + server: abc + container: abc + access_key: s3.amazonaws.com + secret_password: abc + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: object store Create + netapp.ontap.na_ontap_object_store: + state: absent + name: ansible + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapObjectStoreConfig(): + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + change_password=dict(required=False, type='bool', default=False, no_log=False), + )) + # only fields that are readable through a GET request + rest_options = dict( + name=dict(required=True, type='str'), + provider_type=dict(required=False, type='str'), + server=dict(required=False, type='str'), + container=dict(required=False, type='str'), + access_key=dict(required=False, type='str', no_log=True), + port=dict(required=False, type='int'), + certificate_validation_enabled=dict(required=False, type='bool'), + ssl_enabled=dict(required=False, type='bool'), + owner=dict(required=False, type='str') + ) + self.rest_get_fields = list(rest_options.keys()) + # additional fields for POST/PATCH + rest_options.update(dict( + secret_password=dict(required=False, type='str', no_log=True), + )) + self.rest_all_fields = rest_options.keys() + self.argument_spec.update(rest_options) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # API should be used for ONTAP 9.6 or higher, Zapi for lower version + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + elif not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + if self.parameters.get('owner', 'fabricpool') != 'fabricpool': + self.module.fail_json(msg='Error: unsupported value for owner: %s when using ZAPI.' % self.parameters.get('owner')) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_aggr_object_store(self): + """ + Fetch details if object store config exists. + :return: + Dictionary of current details if object store config found + None if object store config is not found + """ + if self.use_rest: + api = "cloud/targets" + query = {'name': self.parameters['name']} + fields = ','.join(self.rest_get_fields) + fields += ',uuid' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error %s' % error) + return record + else: + aggr_object_store_get_iter = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-object-store-config-get', **{'object-store-name': self.parameters['name']}) + try: + result = self.server.invoke_successfully(aggr_object_store_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + # Error 15661 denotes an object store not being found. + if to_native(error.code) == "15661": + return None + else: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + info = self.na_helper.safe_get(result, ['attributes', 'aggr-object-store-config-info']) + if info: + zapi_to_rest = { + 'access_key': dict(key_list=['access-key'], convert_to=str), + 'certificate_validation_enabled': dict(key_list=['is-certificate-validation-enabled'], convert_to=bool), + 'container': dict(key_list=['s3-name'], convert_to=str), + 'name': dict(key_list=['object-store-name'], convert_to=str), + 'port': dict(key_list=['port'], convert_to=int), + 'provider_type': dict(key_list=['provider-type'], convert_to=str), + 'ssl_enabled': dict(key_list=['ssl-enabled'], convert_to=bool), + 'server': dict(key_list=['server'], convert_to=str) + } + results = {} + self.na_helper.zapi_get_attrs(info, zapi_to_rest, results) + return results + return None + + def validate_and_build_body(self, modify=None): + if modify is None: + required_keys = set(['provider_type', 'server', 'container', 'access_key']) + if not required_keys.issubset(set(self.parameters.keys())): + self.module.fail_json(msg='Error provisioning object store %s: one of the following parameters are missing %s' + % (self.parameters['name'], ', '.join(required_keys))) + if not self.use_rest: + return None + params = self.parameters if modify is None else modify + body = {} + for key in (self.rest_all_fields): + if params.get(key) is not None: + body[key] = params[key] + if not modify and 'owner' not in body: + body['owner'] = 'fabricpool' + if modify and 'owner' in body: + self.module.fail_json(msg='Error modifying object store, owner cannot be changed. Found: %s.' % body['owner']) + return body + + def create_aggr_object_store(self, body): + """ + Create aggregate object store config + :return: None + """ + if self.use_rest: + api = "cloud/targets" + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error %s' % error) + else: + options = {'object-store-name': self.parameters['name'], + 'provider-type': self.parameters['provider_type'], + 'server': self.parameters['server'], + 's3-name': self.parameters['container'], + 'access-key': self.parameters['access_key']} + if self.parameters.get('secret_password'): + options['secret-password'] = self.parameters['secret_password'] + if self.parameters.get('port') is not None: + options['port'] = str(self.parameters['port']) + if self.parameters.get('certificate_validation_enabled') is not None: + options['is-certificate-validation-enabled'] = str(self.parameters['certificate_validation_enabled']).lower() + if self.parameters.get('ssl_enabled') is not None: + options['ssl-enabled'] = str(self.parameters['ssl_enabled']).lower() + object_store_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-object-store-config-create', **options) + + try: + self.server.invoke_successfully(object_store_create, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error provisioning object store config %s: %s" + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_aggr_object_store(self, body, uuid=None): + """ + modify aggregate object store config + :return: None + """ + api = "cloud/targets" + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg='Error %s' % error) + + def delete_aggr_object_store(self, uuid=None): + """ + Delete aggregate object store config + :return: None + """ + if self.use_rest: + api = "cloud/targets" + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid) + if error: + self.module.fail_json(msg='Error %s' % error) + else: + object_store_destroy = netapp_utils.zapi.NaElement.create_node_with_children( + 'aggr-object-store-config-delete', **{'object-store-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(object_store_destroy, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error removing object store config %s: %s" % + (self.parameters['name'], to_native(error)), exception=traceback.format_exc()) + + def apply(self): + """ + Apply action to the object store config + :return: None + """ + modify = None + current = self.get_aggr_object_store() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.parameters['change_password'] and 'secret_password' in self.parameters: + if not modify: + modify = {} + modify['secret_password'] = self.parameters['secret_password'] + self.na_helper.changed = True + self.module.warn('na_ontap_object_store is not idempotent when change_password is set to true') + if not self.use_rest and modify: + self.module.fail_json(msg="Error - modify is not supported with ZAPI: %s" % modify) + if cd_action == 'create' or modify: + body = self.validate_and_build_body(modify) + + if self.na_helper.changed and not self.module.check_mode: + uuid = current['uuid'] if current and self.use_rest else None + if cd_action == 'create': + self.create_aggr_object_store(body) + elif cd_action == 'delete': + self.delete_aggr_object_store(uuid) + elif modify: + self.modify_aggr_object_store(body, uuid) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Create Object Store Config class instance and invoke apply + :return: None + """ + obj_store = NetAppOntapObjectStoreConfig() + obj_store.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py new file mode 100644 index 000000000..33dd22e39 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_partitions.py @@ -0,0 +1,415 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_partitions + +short_description: NetApp ONTAP Assign partitions and disks to nodes. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Assign the specified number of partitions or disks eligible for partitioning to a node. +- There is some overlap between this module and the na_ontap_disks module. +- If you don't have ADP v1 or v2 then you should be using the na_ontap_disks module to assign whole disks. +- Partitions/disks are added in the following order +- 1. Any unassigned partitions are added. +- 2. Any unassigned disks of the correct type are added and will be partitioned when added to an aggregate if required. +- 3. Any spare partner partitions will be re-assigned. +- 4. Any partner spare disks will be re-assigned and be partitioned when added to an aggregate. +- If you specify a partition_count less than the current number of partitions, then spare partitions will be unassigned. +- If a previously partitioned disk has the partitions removed, and even if it is "slow zeroed" the system \ + will consider it a shared partitioned disk rather than a spare. +- In a root-data-data configuration (ADPv2) if you specify data1 as the partition_type then only P1 partitions will be counted. +- Disk autoassign must be turned off before using this module to prevent the disks being reassigned automatically by the cluster. +- This can be done through na_ontap_disk_options or via the cli "disk option modify -node -autoassign off". + +options: + node: + required: true + type: str + description: + - Specifies the node that the partitions and disks should be assigned to. + + partition_count: + required: true + type: int + description: + - Total number of partitions that should be assigned to the owner. + + disk_type: + required: true + choices: ["ATA", "BSAS", "FCAL", "FSAS", "LUN", "MSATA", "SAS", "SSD", "SSD_NVM", "VMDISK", "unknown"] + type: str + description: + - The type of disk that the partitions that should use. + + partition_type: + required: true + choices: ["data", "root", "data1", "data2"] + type: str + description: + - The type of partiton being assigned either root, data, data1 or data2, + + partitioning_method: + required: true + choices: ["root_data", "root_data1_data2"] + type: str + description: + - The type of partiton method being used, either root_data or root_data1_data2. + + min_spares: + description: + - Minimum spares disks or partitions required per type for the node. + type: int + +''' + +EXAMPLES = """ +- name: Assign specified total partitions to node cluster-01 + na_ontap_disk_partitions_custom: + node: cluster-01 + partition_count: 56 + disk_type: FSAS + partition_type: data + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" +""" + +RETURN = """ + +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapPartitions(): + ''' object initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + node=dict(required=True, type='str'), + partition_count=dict(required=True, type='int'), + disk_type=dict(required=True, type='str', choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'SSD_NVM', 'VMDISK', 'unknown']), + partition_type=dict(required=True, type='str', choices=['data1', 'data2', 'data', 'root']), + partitioning_method=dict(required=True, type='str', choices=['root_data1_data2', 'root_data']), + min_spares=dict(required=False, type='int') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # If min_spares is not specified min_spares is 1 if SSD, min_spares is 2 for any other disk type. + if 'min_spares' not in self.parameters: + if self.parameters['disk_type'] in ('SSD', 'SSD_NVM'): + self.parameters['min_spares'] = 1 + else: + self.parameters['min_spares'] = 2 + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + self.module.fail_json(msg=self.rest_api.requires_ontap_version('na_ontap_partitions', '9.6')) + + def get_disks(self, container_type, node=None): + """ + Check for owned disks, unassigned disks or spare disks. + Return: list of disks or an empty list + """ + api = 'storage/disks' + + if container_type == 'unassigned': + query = { + 'container_type': 'unassigned', + 'type': self.parameters['disk_type'], + 'fields': 'name' + } + if container_type == 'spare': + query = { + 'home_node.name': node, + 'container_type': 'spare', + 'type': self.parameters['disk_type'], + 'fields': 'name' + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + + if error: + self.module.fail_json(msg=error) + + return records if records else list() + + def get_partitions(self, container_type, node=None): + """ + Get partitions info + Return: list of partitions of a specified container type or None. + """ + api = 'private/cli/storage/disk/partition' + + query = {} + + if container_type == 'spare': + query = { + 'fields': 'partition,container-type,disk-type,partitioning-method,home-node-name,is-root,owner-node-name', + 'home-node-name': node, + 'disk-type': self.parameters['disk_type'], + 'container-type': 'spare', + 'partitioning-method': self.parameters['partitioning_method'] + } + if container_type == 'unassigned': + query = { + 'fields': 'partition,container-type,disk-type,partitioning-method,home-node-name,is-root,owner-node-name', + 'nodelist': node, + 'disk-type': self.parameters['disk_type'], + 'container-type': 'unassigned' + } + if container_type == 'owner': + query = { + 'fields': 'partition,container-type,disk-type,partitioning-method,home-node-name,is-root,owner-node-name', + 'home-node-name': node, + 'disk-type': self.parameters['disk_type'], + 'partitioning-method': self.parameters['partitioning_method'] + } + + if self.parameters['partition_type'] == 'root': + query['is-root'] = True + else: + query['is-root'] = False + + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + if error: + self.module.fail_json(msg=error) + + if records: + if self.parameters['partitioning_method'] == 'root_data1_data2': + # get just the P1 or P2 partitions + data_partitions = [] + for record in records: + if self.parameters['partition_type'] == 'data1' and record['partition'].endswith('P1'): + data_partitions.append(record) + elif self.parameters['partition_type'] == 'data2' and record['partition'].endswith('P2'): + data_partitions.append(record) + return data_partitions + + return records + else: + return list() + + def get_partner_node_name(self): + """ + return: partner_node_name, str + """ + api = 'cluster/nodes' + query = { + 'ha.partners.name': self.parameters['node'] + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_more_records(api, message, error) + + if error: + self.module.fail_json(msg=error) + + return records[0]['name'] if records else None + + def assign_disks(self, disks): + """ + Assign disks to node + """ + api = 'private/cli/storage/disk/assign' + for disk in disks: + body = { + 'owner': self.parameters['node'], + 'disk': disk['name'] + } + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def unassign_disks(self, disks): + """ + Unassign disks. + Disk autoassign must be turned off when removing ownership of a disk + """ + api = 'private/cli/storage/disk/removeowner' + for disk in disks: # api requires 1 disk to be removed at a time. + body = { + 'disk': disk['name'] + } + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def assign_partitions(self, required_partitions): + """ + Assign partitions to node + """ + api = 'private/cli/storage/disk/partition/assign' + for required_partition in required_partitions: + body = { + 'owner': self.parameters['node'], + 'partition': required_partition['partition'] + } + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def unassign_partitions(self, required_partitions): + """ + Unassign partitions from node + """ + api = 'private/cli/storage/disk/partition/removeowner' + for required_partition in required_partitions: + body = { + 'partition': required_partition['partition'] + } + + dummy, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg=error) + + def determine_assignment(self, owned_partitions, own_spare_disks): + """ + Determine which action to take + return: dict containing lists of the disks/partitions to be assigned/reassigned + """ + # build dict of partitions and disks to be unassigned/assigned + assignment = { + 'required_unassigned_partitions': [], + 'required_partner_spare_disks': [], + 'required_partner_spare_partitions': [], + 'required_unassigned_disks': [] + } + + unassigned_partitions = self.get_partitions(container_type='owned', node=self.parameters['node']) + required_partitions = self.parameters['partition_count'] - (len(owned_partitions) + len(own_spare_disks)) + + # are there enough unassigned partitions to meet the requirement? + if required_partitions > len(unassigned_partitions): + assignment['required_unassigned_partitions'] = unassigned_partitions + # check for unassigned disks + unassigned_disks = self.get_disks(container_type='spare') + required_unassigned_disks = required_partitions - len(unassigned_partitions) + if required_unassigned_disks > len(unassigned_disks): + assignment['required_unassigned_disks'] = unassigned_disks + # not enough unassigned disks + required_partner_spare_partitions = required_unassigned_disks - len(unassigned_disks) + partner_node_name = self.get_partner_node_name() + if partner_node_name: + partner_spare_partitions = self.get_partitions(container_type='spare', node=partner_node_name) + partner_spare_disks = self.get_disks(container_type='spare', node=partner_node_name) + else: + partner_spare_partitions = [] + partner_spare_disks = [] + # check for partner spare partitions + if required_partner_spare_partitions <= (len(partner_spare_partitions) - self.parameters['min_spares']): + # we have enough spare partitions and dont need any disks + assignment['required_partner_spare_partitions'] = partner_spare_partitions[0:required_partner_spare_partitions] + else: + # we don't know if we can take all of the spare partitions as we don't know how many spare disks there are + # spare partions != spare disks + if len(partner_spare_disks) >= self.parameters['min_spares']: + # we have enough spare disks so can use all spare partitions if required + if required_partner_spare_partitions <= len(partner_spare_partitions): + # now we know we have spare disks we can take as may spare partitions as we need + assignment['required_partner_spare_partitions'] = partner_spare_partitions[0:required_partner_spare_partitions] + else: + # we need to take some spare disks as well as using any spare partitions + required_partner_spare_disks = required_partner_spare_partitions - len(partner_spare_partitions) + required_partner_spare_partitions_count = required_partner_spare_partitions - required_partner_spare_disks + assignment['required_partner_spare_partitions'] = partner_spare_partitions[0:required_partner_spare_partitions_count] + if required_partner_spare_disks > len(partner_spare_disks) - self.parameters['min_spares']: + self.module.fail_json(msg='Not enough partner spare disks or partner spare partitions remain to fulfill the request') + else: + assignment['required_partner_spare_disks'] = partner_spare_disks[0:required_partner_spare_disks] + else: + self.module.fail_json(msg='Not enough partner spare disks or partner spare partitions remain to fulfill the request') + else: + assignment['required_unassigned_disks'] = unassigned_disks[0:required_unassigned_disks] + else: + assignment['required_unassigned_partitions'] = unassigned_partitions[0:required_partitions] + + return assignment + + def apply(self): + '''Apply action to partitions''' + changed = False + + owned_partitions = self.get_partitions(container_type='owned', node=self.parameters['node']) + own_spare_disks = self.get_disks(container_type='spare', node=self.parameters['node']) + + # are the required partitions more than the currently owned partitions and spare disks, if so we need to assign + if self.parameters['partition_count'] > (len(own_spare_disks) + len(owned_partitions)): + # which action needs to be taken + assignment = self.determine_assignment(owned_partitions=owned_partitions, own_spare_disks=own_spare_disks) + + # now that we have calculated where the partitions and disks come from we can take action + if len(assignment['required_unassigned_partitions']) > 0: + changed = True + if not self.module.check_mode: + self.assign_partitions(assignment['required_unassigned_partitions']) + + if len(assignment['required_unassigned_disks']) > 0: + changed = True + if not self.module.check_mode: + self.assign_disks(assignment['required_unassigned_disks']) + + if len(assignment['required_partner_spare_partitions']) > 0: + changed = True + if not self.module.check_mode: + self.unassign_partitions(assignment['required_partner_spare_partitions']) + self.assign_partitions(assignment['required_partner_spare_partitions']) + + if len(assignment['required_partner_spare_disks']) > 0: + changed = True + if not self.module.check_mode: + self.unassign_disks(assignment['required_partner_spare_disks']) + self.assign_disks(assignment['required_partner_spare_disks']) + + # unassign + elif self.parameters['partition_count'] < len(owned_partitions): + spare_partitions = self.get_partitions(container_type='spare', node=self.parameters['node']) + unassign_partitions = len(owned_partitions) - self.parameters['partition_count'] + + if unassign_partitions > len(spare_partitions): + self.module.fail_json(msg='Not enough spare partitions exist fulfill the partition unassignment request') + elif (len(spare_partitions) - unassign_partitions + len(own_spare_disks)) < self.parameters['min_spares']: + self.module.fail_json(msg='Unassignment of specified partitions would leave node with less than the minimum number of spares') + else: + changed = True + if not self.module.check_mode: + self.unassign_partitions(spare_partitions[0:unassign_partitions]) + + self.module.exit_json(changed=changed) + + +def main(): + ''' Create object and call apply ''' + obj_aggr = NetAppOntapPartitions() + obj_aggr.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py new file mode 100644 index 000000000..885bee277 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ports.py @@ -0,0 +1,583 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_ports +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_ports +short_description: NetApp ONTAP add/remove ports +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Add or remove ports for broadcast domain and portset. + +options: + state: + description: + - Whether the specified port should be added or removed. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Name of the SVM. + - Specify this option when operating on portset. + type: str + + names: + description: + - List of ports. + type: list + elements: str + required: true + + resource_name: + description: + - name of the portset or broadcast domain. + type: str + required: true + + resource_type: + description: + - type of the resource to add a port to or remove a port from. + - adding or removing ports in portset requires ONTAP version 9.9 or later in REST + choices: ['broadcast_domain', 'portset'] + required: true + type: str + + ipspace: + description: + - Specify the required ipspace for the broadcast domain. + - A domain ipspace can not be modified after the domain has been created. + type: str + + portset_type: + description: + - Protocols accepted for portset. + choices: ['fcp', 'iscsi', 'mixed'] + type: str + +''' + +EXAMPLES = ''' + + - name: broadcast domain remove port + tags: + - remove + netapp.ontap.na_ontap_ports: + state: absent + names: test-vsim1:e0d-1,test-vsim1:e0d-2 + resource_type: broadcast_domain + resource_name: ansible_domain + hostname: "{{ hostname }}" + username: user + password: password + https: False + + - name: broadcast domain add port + tags: + - add + netapp.ontap.na_ontap_ports: + state: present + names: test-vsim1:e0d-1,test-vsim1:e0d-2 + resource_type: broadcast_domain + resource_name: ansible_domain + ipspace: Default + hostname: "{{ hostname }}" + username: user + password: password + https: False + + - name: portset remove port + tags: + - remove + netapp.ontap.na_ontap_ports: + state: absent + names: lif_2 + resource_type: portset + resource_name: portset_1 + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: user + password: password + https: False + + - name: portset add port + tags: + - add + netapp.ontap.na_ontap_ports: + state: present + names: lif_2 + resource_type: portset + resource_name: portset_1 + portset_type: iscsi + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: user + password: password + https: False + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapPorts: + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=False, type='str'), + names=dict(required=True, type='list', elements='str'), + resource_name=dict(required=True, type='str'), + resource_type=dict(required=True, type='str', choices=['broadcast_domain', 'portset']), + ipspace=dict(required=False, type='str'), + portset_type=dict(required=False, type='str', choices=['fcp', 'iscsi', 'mixed']), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('resource_type', 'portset', ['vserver']), + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.desired_ports = None + self.desired_lifs = None + + if self.use_rest and 'ipspace' not in self.parameters and self.parameters['resource_type'] == 'broadcast_domain': + error_msg = "Error: ipspace space is a required option with REST" + self.module.fail_json(msg=error_msg) + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and self.parameters['resource_type'] == 'portset': + self.module.fail_json(msg='Error: adding or removing ports in portset requires ONTAP version 9.9 or later in REST') + + if 'names' in self.parameters: + self.parameters['names'] = list(set([port.strip() for port in self.parameters['names']])) + if self.use_rest and self.parameters['resource_type'] == 'broadcast_domain': + self.desired_ports = self.get_ports_rest(self.parameters['names']) + if self.use_rest and self.parameters['resource_type'] == 'portset': + self.desired_lifs = self.get_san_lifs_rest(self.parameters['names']) + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if self.parameters['resource_type'] == 'broadcast_domain': + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + elif self.parameters['resource_type'] == 'portset': + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def add_broadcast_domain_ports(self, ports): + """ + Add broadcast domain ports + :param: ports to be added. + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports') + domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name']) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in ports: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + return True + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding port for broadcast domain %s: %s' % + (self.parameters['resource_name'], to_native(error)), + exception=traceback.format_exc()) + + def add_broadcast_domain_ports_rest(self, ports): + """ + Add broadcast domain ports in rest. + :param: ports to be added or moved. + """ + api = 'network/ethernet/ports' + body = { + 'broadcast_domain': { + 'name': self.parameters['resource_name'], + 'ipspace': {'name': self.parameters['ipspace']} + } + } + for port in ports: + dummy, error = rest_generic.patch_async(self.rest_api, api, port['uuid'], body) + if error: + self.module.fail_json(msg=error) + + def remove_broadcast_domain_ports(self, ports): + """ + Deletes broadcast domain ports + :param: ports to be removed. + """ + domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports') + domain_obj.add_new_child("broadcast-domain", self.parameters['resource_name']) + if self.parameters.get('ipspace'): + domain_obj.add_new_child("ipspace", self.parameters['ipspace']) + ports_obj = netapp_utils.zapi.NaElement('ports') + domain_obj.add_child_elem(ports_obj) + for port in ports: + ports_obj.add_new_child('net-qualified-port-name', port) + try: + self.server.invoke_successfully(domain_obj, True) + return True + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing port for broadcast domain %s: %s' % + (self.parameters['resource_name'], to_native(error)), + exception=traceback.format_exc()) + + def remove_broadcast_domain_ports_rest(self, ports, ipspace): + body = {'ports': ports} + api = "private/cli/network/port/broadcast-domain/remove-ports" + query = {'broadcast-domain': self.parameters['resource_name'], 'ipspace': ipspace} + response, error = rest_generic.patch_async(self.rest_api, api, None, body, query) + if error: + self.module.fail_json(msg='Error removing ports: %s' % error) + + def get_broadcast_domain_ports(self): + """ + Return details about the broadcast domain ports. + :return: Details about the broadcast domain ports. [] if not found. + :rtype: list + """ + if self.use_rest: + return self.get_broadcast_domain_ports_rest() + domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter') + broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info') + broadcast_domain_info.add_new_child('broadcast-domain', self.parameters['resource_name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(broadcast_domain_info) + domain_get_iter.add_child_elem(query) + result = self.server.invoke_successfully(domain_get_iter, True) + ports = [] + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info') + domain_ports = domain_info.get_child_by_name('ports') + if domain_ports is not None: + ports = [port.get_child_content('port') for port in domain_ports.get_children()] + return ports + + def get_broadcast_domain_ports_rest(self): + """ + Return details about the broadcast domain ports. + :return: Details about the broadcast domain ports. [] if not found. + :rtype: list + """ + api = 'network/ethernet/broadcast-domains' + query = {'name': self.parameters['resource_name'], 'ipspace.name': self.parameters['ipspace']} + fields = 'ports' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + ports = [] + if record and 'ports' in record: + ports = ['%s:%s' % (port['node']['name'], port['name']) for port in record['ports']] + return ports + + def remove_portset_ports(self, port, portset_uuid=None): + """ + Removes all existing ports from portset + :return: None + """ + if self.use_rest: + return self.remove_portset_ports_rest(port, portset_uuid) + options = {'portset-name': self.parameters['resource_name'], + 'portset-port-name': port.strip()} + + portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-remove', **options) + + try: + self.server.invoke_successfully(portset_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing port in portset %s: %s' % + (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc()) + + def remove_portset_ports_rest(self, port, portset_uuid): + """ + Removes all existing ports from portset + :return: None + """ + api = 'protocols/san/portsets/%s/interfaces' % portset_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.desired_lifs[port]['uuid']) + if error: + self.module.fail_json(msg=error) + + def add_portset_ports(self, port): + """ + Add the list of ports to portset + :return: None + """ + options = {'portset-name': self.parameters['resource_name'], + 'portset-port-name': port.strip()} + + portset_modify = netapp_utils.zapi.NaElement.create_node_with_children('portset-add', **options) + + try: + self.server.invoke_successfully(portset_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding port in portset %s: %s' % + (self.parameters['resource_name'], to_native(error)), exception=traceback.format_exc()) + + def add_portset_ports_rest(self, portset_uuid, ports_to_add): + """ + Add the list of ports to portset + :return: None + """ + api = 'protocols/san/portsets/%s/interfaces' % portset_uuid + body = {'records': []} + for port in ports_to_add: + body['records'].append({self.desired_lifs[port]['lif_type']: {'name': port}}) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg=error) + + def portset_get_iter(self): + """ + Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters + :return: NaElement object for portset-get-iter with query + """ + portset_get = netapp_utils.zapi.NaElement('portset-get-iter') + query = netapp_utils.zapi.NaElement('query') + portset_info = netapp_utils.zapi.NaElement('portset-info') + portset_info.add_new_child('vserver', self.parameters['vserver']) + portset_info.add_new_child('portset-name', self.parameters['resource_name']) + if self.parameters.get('portset_type'): + portset_info.add_new_child('portset-type', self.parameters['portset_type']) + query.add_child_elem(portset_info) + portset_get.add_child_elem(query) + return portset_get + + def portset_get(self): + """ + Get current portset info + :return: List of current ports if query successful, else return [] + """ + portset_get_iter = self.portset_get_iter() + result, ports = None, [] + try: + result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching portset %s: %s' + % (self.parameters['resource_name'], to_native(error)), + exception=traceback.format_exc()) + # return portset details + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info') + if int(portset_get_info.get_child_content('portset-port-total')) > 0: + port_info = portset_get_info.get_child_by_name('portset-port-info') + ports = [port.get_content() for port in port_info.get_children()] + return ports + + def portset_get_rest(self): + """ + Get current portset info + :return: List of current ports if query successful, else return {} + """ + api = 'protocols/san/portsets' + query = { + 'svm.name': self.parameters['vserver'], + 'name': self.parameters['resource_name'] + } + if self.parameters.get('portset_type'): + query['protocol'] = self.parameters['portset_type'] + fields = 'interfaces' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + current = {} + if record: + current['uuid'] = record['uuid'] + if 'interfaces' in record: + # This will form ports list for fcp, iscsi and mixed protocols. + ports = [lif.get('ip', lif.get('fc'))['name'] for lif in record['interfaces']] + current['ports'] = ports + if not current and self.parameters['state'] == 'present': + error_msg = "Error: Portset '%s' does not exist" % self.parameters['resource_name'] + self.module.fail_json(msg=error_msg) + return current + + def modify_broadcast_domain_ports(self): + """ + compare current and desire ports. Call add or remove ports methods if needed. + :return: None. + """ + current_ports = self.get_broadcast_domain_ports() + cd_ports = self.parameters['names'] + if self.parameters['state'] == 'present': + ports_to_add = [port for port in cd_ports if port not in current_ports] + if len(ports_to_add) > 0: + if not self.module.check_mode: + if self.use_rest: + self.add_broadcast_domain_ports_rest(self.ports_to_add_from_desired(ports_to_add)) + else: + self.add_broadcast_domain_ports(ports_to_add) + self.na_helper.changed = True + + if self.parameters['state'] == 'absent': + ports_to_remove = [port for port in cd_ports if port in current_ports] + if len(ports_to_remove) > 0: + if not self.module.check_mode: + if self.use_rest: + self.remove_broadcast_domain_ports_rest(ports_to_remove, self.parameters['ipspace']) + else: + self.remove_broadcast_domain_ports(ports_to_remove) + self.na_helper.changed = True + + def modify_portset_ports(self): + uuid = None + if self.use_rest: + current = self.portset_get_rest() + if 'uuid' in current: + uuid = current['uuid'] + current_ports = current['ports'] if 'ports' in current else [] + else: + current_ports = self.portset_get() + cd_ports = self.parameters['names'] + if self.parameters['state'] == 'present': + ports_to_add = [port for port in cd_ports if port not in current_ports] + if len(ports_to_add) > 0: + if not self.module.check_mode: + if self.use_rest: + self.add_portset_ports_rest(uuid, ports_to_add) + else: + for port in ports_to_add: + self.add_portset_ports(port) + self.na_helper.changed = True + + if self.parameters['state'] == 'absent': + ports_to_remove = [port for port in cd_ports if port in current_ports] + if len(ports_to_remove) > 0: + if not self.module.check_mode: + for port in ports_to_remove: + self.remove_portset_ports(port, uuid) + self.na_helper.changed = True + + def get_ports_rest(self, ports): + # list of desired ports not present in the node. + missing_ports = [] + # list of uuid information of each desired port should present in broadcast domain. + desired_ports = [] + for port in ports: + current = self.get_net_port_rest(port) + if current is None: + missing_ports.append(port) + else: + desired_ports.append(current) + # Error if any of provided ports are not found. + if missing_ports and self.parameters['state'] == 'present': + self.module.fail_json(msg='Error: ports: %s not found' % ', '.join(missing_ports)) + return desired_ports + + def get_net_port_rest(self, port): + if ':' not in port: + error_msg = "Error: Invalid value specified for port: %s, provide port name as node_name:port_name" % port + self.module.fail_json(msg=error_msg) + node_name, port_name = port.split(':') + api = 'network/ethernet/ports' + query = { + 'name': port_name, + 'node.name': node_name, + } + fields = 'name,uuid' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg=error) + if record: + current = {'uuid': record['uuid'], 'name': '%s:%s' % (record['node']['name'], record['name'])} + return current + return None + + def ports_to_add_from_desired(self, ports): + ports_to_add = [] + for port in ports: + for port_to_add in self.desired_ports: + if port == port_to_add['name']: + ports_to_add.append({'uuid': port_to_add['uuid']}) + return ports_to_add + + def get_san_lifs_rest(self, san_lifs): + # list of lifs not present in the vserver + missing_lifs = [] + # dict with each key is lif name, value contains lif type - fc or ip and uuid. + desired_lifs = {} + record, record2, error, error2 = None, None, None, None + for lif in san_lifs: + if self.parameters.get('portset_type') in [None, 'mixed', 'iscsi']: + record, error = self.get_san_lif_type(lif, 'ip') + if self.parameters.get('portset_type') in [None, 'mixed', 'fcp']: + record2, error2 = self.get_san_lif_type(lif, 'fc') + if error is None and error2 is not None and record: + # ignore error on fc if ip interface is found + error2 = None + if error2 is None and error is not None and record2: + # ignore error on ip if fc interface is found + error = None + if error or error2: + errors = [to_native(err) for err in (error, error2) if err] + self.module.fail_json(msg='Error fetching lifs details for %s: %s' % (lif, ' - '.join(errors)), + exception=traceback.format_exc()) + if record: + desired_lifs[lif] = {'lif_type': 'ip', 'uuid': record['uuid']} + if record2: + desired_lifs[lif] = {'lif_type': 'fc', 'uuid': record2['uuid']} + if record is None and record2 is None: + missing_lifs.append(lif) + if missing_lifs and self.parameters['state'] == 'present': + error_msg = 'Error: lifs: %s of type %s not found in vserver %s' % \ + (', '.join(missing_lifs), self.parameters.get('portset_type', 'fcp or iscsi'), self.parameters['vserver']) + self.module.fail_json(msg=error_msg) + return desired_lifs + + def get_san_lif_type(self, lif, portset_type): + api = 'network/%s/interfaces' % portset_type + query = {'name': lif, 'svm.name': self.parameters['vserver']} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + return record, error + + def apply(self): + if self.parameters['resource_type'] == 'broadcast_domain': + self.modify_broadcast_domain_ports() + elif self.parameters['resource_type'] == 'portset': + self.modify_portset_ports() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + portset_obj = NetAppOntapPorts() + portset_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py new file mode 100644 index 000000000..2132a1b0b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_portset.py @@ -0,0 +1,423 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +short_description: NetApp ONTAP Create/Delete portset +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete ONTAP portset, modify ports in a portset. + - Modify type(protocol) is not supported in ONTAP. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_portset +options: + state: + description: + - If you want to create a portset. + default: present + type: str + vserver: + required: true + description: + - Name of the SVM. + type: str + name: + required: true + description: + - Name of the port set to create. + type: str + type: + description: + - Required for create in ZAPI. + - Default value is mixed if not specified at the time of creation in REST. + - Protocols accepted for this portset. + choices: ['fcp', 'iscsi', 'mixed'] + type: str + force: + description: + - If 'false' or not specified, the request will fail if there are any igroups bound to this portset. + - If 'true', forcibly destroy the portset, even if there are existing igroup bindings. + type: bool + default: False + ports: + description: + - Specify the ports associated with this portset. Should be comma separated. + - It represents the expected state of a list of ports at any time, and replaces the current value of ports. + - Adds a port if it is specified in expected state but not in current state. + - Deletes a port if it is in current state but not in expected state. + type: list + elements: str +version_added: 2.8.0 + +''' + +EXAMPLES = """ + - name: Create Portset + netapp.ontap.na_ontap_portset: + state: present + vserver: vserver_name + name: portset_name + ports: a1 + type: "{{ protocol type }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" + + - name: Modify ports in portset + netapp.ontap.na_ontap_portset: + state: present + vserver: vserver_name + name: portset_name + ports: a1,a2 + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" + + - name: Delete Portset + netapp.ontap.na_ontap_portset: + state: absent + vserver: vserver_name + name: portset_name + force: True + type: "{{ protocol type }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPPortset: + """ + Methods to create or delete portset + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + type=dict(required=False, type='str', choices=[ + 'fcp', 'iscsi', 'mixed']), + force=dict(required=False, type='bool', default=False), + ports=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if 'ports' in self.parameters: + self.parameters['ports'] = list(set([port.strip() for port in self.parameters['ports']])) + if '' in self.parameters['ports'] and self.parameters['state'] == 'present': + self.module.fail_json(msg="Error: invalid value specified for ports") + + # Setup REST API. + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.uuid, self.lifs_info = None, {} + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + msg = 'REST requires ONTAP 9.9.1 or later for portset APIs.' + if self.parameters['use_rest'].lower() == 'always': + self.module.fail_json(msg='Error: %s' % msg) + if self.parameters['use_rest'].lower() == 'auto': + self.module.warn('Falling back to ZAPI: %s' % msg) + self.use_rest = False + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def portset_get_iter(self): + """ + Compose NaElement object to query current portset using vserver, portset-name and portset-type parameters + :return: NaElement object for portset-get-iter with query + """ + portset_get = netapp_utils.zapi.NaElement('portset-get-iter') + query = netapp_utils.zapi.NaElement('query') + portset_info = netapp_utils.zapi.NaElement('portset-info') + portset_info.add_new_child('vserver', self.parameters['vserver']) + portset_info.add_new_child('portset-name', self.parameters['name']) + query.add_child_elem(portset_info) + portset_get.add_child_elem(query) + return portset_get + + def portset_get(self): + """ + Get current portset info + :return: Dictionary of current portset details if query successful, else return None + """ + if self.use_rest: + return self.portset_get_rest() + portset_get_iter = self.portset_get_iter() + result, portset_info = None, dict() + try: + result = self.server.invoke_successfully(portset_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching portset %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + # return portset details + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + portset_get_info = result.get_child_by_name('attributes-list').get_child_by_name('portset-info') + portset_info['type'] = portset_get_info.get_child_content('portset-type') + if int(portset_get_info.get_child_content('portset-port-total')) > 0: + ports = portset_get_info.get_child_by_name('portset-port-info') + portset_info['ports'] = [port.get_content() for port in ports.get_children()] + else: + portset_info['ports'] = [] + return portset_info + return None + + def create_portset(self): + """ + Create a portset + """ + if self.use_rest: + return self.create_portset_rest() + if self.parameters.get('type') is None: + self.module.fail_json(msg='Error: Missing required parameter for create (type)') + portset_info = netapp_utils.zapi.NaElement("portset-create") + portset_info.add_new_child("portset-name", self.parameters['name']) + portset_info.add_new_child("portset-type", self.parameters['type']) + try: + self.server.invoke_successfully( + portset_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error creating portset %s: %s" % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_portset(self): + """ + Delete a portset + """ + if self.use_rest: + return self.delete_portset_rest() + portset_info = netapp_utils.zapi.NaElement("portset-destroy") + portset_info.add_new_child("portset-name", self.parameters['name']) + if self.parameters.get('force'): + portset_info.add_new_child("force", str(self.parameters['force'])) + try: + self.server.invoke_successfully( + portset_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error deleting portset %s: %s" % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def remove_ports(self, ports): + """ + Removes all existing ports from portset + :return: None + """ + for port in ports: + self.modify_port(port, 'portset-remove', 'removing') + + def add_ports(self, ports=None): + """ + Add the list of ports to portset + :return: None + """ + if ports is None: + ports = self.parameters.get('ports') + # don't add if ports is None + if ports is None: + return + for port in ports: + self.modify_port(port, 'portset-add', 'adding') + + def modify_port(self, port, zapi, action): + """ + Add or remove an port to/from a portset + """ + options = {'portset-name': self.parameters['name'], + 'portset-port-name': port} + + portset_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + + try: + self.server.invoke_successfully(portset_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error %s port in portset %s: %s' % (action, self.parameters['name'], + to_native(error)), + exception=traceback.format_exc()) + + def portset_get_rest(self): + api = "protocols/san/portsets" + query = {'name': self.parameters['name'], 'svm.name': self.parameters['vserver']} + fields = 'uuid,protocol,interfaces' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error fetching portset %s: %s' + % (self.parameters['name'], to_native(error))) + portset_info = None + if record: + portset_info = self.form_portset_info(record) + return portset_info + + def form_portset_info(self, record): + self.uuid = record['uuid'] + # if type is not set, assign current type + # for avoiding incompatible network interface error in modify portset. + if self.parameters.get('type') is None: + self.parameters['type'] = record['protocol'] + portset_info = { + 'type': record['protocol'], + 'ports': [] + } + if 'interfaces' in record: + for lif in record['interfaces']: + for key, value in lif.items(): + if key in ['fc', 'ip']: + # add current lifs type and uuid to self.lifs for modify and delete purpose. + self.lifs_info[value['name']] = {'lif_type': key, 'uuid': value['uuid']} + # This will form ports list for fcp, iscsi and mixed protocols. + portset_info['ports'].append(value['name']) + return portset_info + + def create_portset_rest(self): + api = "protocols/san/portsets" + body = {'name': self.parameters['name'], 'svm.name': self.parameters['vserver']} + if 'type' in self.parameters: + body['protocol'] = self.parameters['type'] + if self.lifs_info: + body['interfaces'] = [{self.lifs_info[lif]['lif_type']: {'name': lif}} for lif in self.lifs_info] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error creating portset %s: %s" % + (self.parameters['name'], to_native(error))) + + def delete_portset_rest(self): + api = "protocols/san/portsets" + # Default value is False if 'force' not in parameters. + query = {'allow_delete_while_bound': self.parameters.get('force', False)} + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid, query) + if error: + self.module.fail_json(msg="Error deleting portset %s: %s" % + (self.parameters['name'], to_native(error))) + + def modify_portset_rest(self, ports_to_add, ports_to_remove): + if ports_to_add: + self.add_ports_to_portset(ports_to_add) + for port in ports_to_remove: + self.remove_port_from_portset(port) + + def add_ports_to_portset(self, ports_to_add): + api = 'protocols/san/portsets/%s/interfaces' % self.uuid + body = {'records': [{self.lifs_info[port]['lif_type']: {'name': port}} for port in ports_to_add]} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error adding port in portset %s: %s' % (self.parameters['name'], + to_native(error))) + + def remove_port_from_portset(self, port_to_remove): + api = 'protocols/san/portsets/%s/interfaces' % self.uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.lifs_info[port_to_remove]['uuid']) + if error: + self.module.fail_json(msg='Error removing port in portset %s: %s' % (self.parameters['name'], + to_native(error))) + + def get_san_lifs_rest(self, san_lifs): + # list of lifs not present in the vserver + missing_lifs = [] + record, record2, error, error2 = None, None, None, None + for lif in san_lifs: + if self.parameters.get('type') in [None, 'mixed', 'iscsi']: + record, error = self.get_san_lif_type_uuid(lif, 'ip') + if self.parameters.get('type') in [None, 'mixed', 'fcp']: + record2, error2 = self.get_san_lif_type_uuid(lif, 'fc') + if error is None and error2 is not None and record: + # ignore error on fc if ip interface is found + error2 = None + if error2 is None and error is not None and record2: + # ignore error on ip if fc interface is found + error = None + if error or error2: + errors = [to_native(err) for err in (error, error2) if err] + self.module.fail_json(msg='Error fetching lifs details for %s: %s' % (lif, ' - '.join(errors)), + exception=traceback.format_exc()) + if record: + self.lifs_info[lif] = {'lif_type': 'ip', 'uuid': record['uuid']} + if record2: + self.lifs_info[lif] = {'lif_type': 'fc', 'uuid': record2['uuid']} + if record is None and record2 is None: + missing_lifs.append(lif) + if missing_lifs and self.parameters['state'] == 'present': + error_msg = 'Error: lifs: %s of type %s not found in vserver %s' % \ + (', '.join(missing_lifs), self.parameters.get('type', 'fcp or iscsi'), self.parameters['vserver']) + self.module.fail_json(msg=error_msg) + + def get_san_lif_type_uuid(self, lif, portset_type): + api = 'network/%s/interfaces' % portset_type + query = {'name': lif, 'svm.name': self.parameters['vserver']} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + return record, error + + def apply(self): + """ + Applies action from playbook + """ + current, modify = self.portset_get(), None + # get lifs type and uuid which is not present in current. + if self.use_rest and self.parameters['state'] == 'present': + self.get_san_lifs_rest([port for port in self.parameters['ports'] if port not in self.lifs_info]) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + if self.parameters.get('type') and self.parameters['type'] != current['type']: + self.module.fail_json(msg="modify protocol(type) not supported and %s already exists in vserver %s under different type" % + (self.parameters['name'], self.parameters['vserver'])) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_portset() + # REST handles create and add ports in create api call itself. + if not self.use_rest: + self.add_ports() + elif cd_action == 'delete': + self.delete_portset() + elif modify: + add_ports = set(self.parameters['ports']) - set(current['ports']) + remove_ports = set(current['ports']) - set(self.parameters['ports']) + if self.use_rest: + self.modify_portset_rest(add_ports, remove_ports) + else: + self.add_ports(add_ports) + self.remove_ports(remove_ports) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + portset_obj = NetAppONTAPPortset() + portset_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py new file mode 100644 index 000000000..420238389 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_publickey.py @@ -0,0 +1,302 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_publickey +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_publickey + +short_description: NetApp ONTAP publickey configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.7.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Add, modify, or remove publickeys. + - Requires ONTAP 9.7 or later, and only supports REST. + +options: + state: + description: + - Whether the specified publickey should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + account: + description: + - The name of the user account. + required: true + type: str + comment: + description: + - Optional comment for the public key. + type: str + delete_all: + description: + - If index is not present, with state=absent, delete all public key for this user account. + type: bool + default: false + index: + description: + - Index number for the public key. + - If index is not present, with state=present, the public key is always added, using the next available index. + - If index is not present, with state=present, the module is not idempotent. + - If index is not present, with state=absent, if only one key is found, it is deleted. Otherwise an error is reported. + - See also C(delete_all) option. + type: int + public_key: + description: + - The public key. + type: str + vserver: + description: + - The name of the vserver to use. + - Omit this option for cluster scoped user accounts. + type: str + +notes: + - This module supports check_mode. + - This module is not idempotent if index is omitted. +''' + +EXAMPLES = """ + + - name: Create publickey + netapp.ontap.na_ontap_publickey: + state: present + account: SampleUser + index: 0 + public_key: "{{ netapp_publickey }}" + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete single publickey + netapp.ontap.na_ontap_publickey: + state: absent + account: SampleUser + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify single publickey + netapp.ontap.na_ontap_publickey: + state: present + account: SampleUser + comment: ssh key for XXXX + index: 0 + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +cd_action: + description: whether a public key is created or deleted. + returned: success + type: str + +modify: + description: attributes that were modified if the key already exists. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapPublicKey: + """ + Common operations to manage public keys. + """ + + def __init__(self): + self.use_rest = False + argument_spec = netapp_utils.na_ontap_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + account=dict(required=True, type='str'), + comment=dict(type='str'), + delete_all=dict(type='bool', default=False), + index=dict(type='int'), + public_key=dict(type='str'), + vserver=dict(type='str'), + )) + + self.module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ('delete_all', 'index') + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # REST API is required + self.rest_api = OntapRestAPI(self.module) + # check version + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_publickey', 9, 7) + + def get_public_keys(self): + api = 'security/authentication/publickeys' + query = { + 'account.name': self.parameters['account'], + 'fields': 'account,owner,index,public_key,comment' + } + if self.parameters.get('vserver') is None: + # vserser is empty for cluster + query['scope'] = 'cluster' + else: + query['owner.name'] = self.parameters['vserver'] + + if self.parameters.get('index') is not None: + query['index'] = self.parameters['index'] + + response, error = self.rest_api.get(api, query) + if self.parameters.get('index') is not None: + record, error = rrh.check_for_0_or_1_records(api, response, error) + records = [record] + else: + records, error = rrh.check_for_0_or_more_records(api, response, error) + if error: + msg = "Error in get_public_key: %s" % error + self.module.fail_json(msg=msg) + if records is None or records == [None]: + records = [] + # flatten {'account': {'name': 'some_name'}} into {'account': 'some_name'} to match input parameters + return [dict([(k, v if k != 'account' else v['name']) for k, v in record.items()]) for record in records] + + def create_public_key(self): + api = 'security/authentication/publickeys' + body = { + 'account.name': self.parameters['account'], + 'public_key': self.parameters['public_key'] + } + if self.parameters.get('vserver') is not None: + # vserser is empty for cluster + body['owner.name'] = self.parameters['vserver'] + for attr in ('comment', 'index'): + value = self.parameters.get(attr) + if value is not None: + body[attr] = value + + dummy, error = self.rest_api.post(api, body) + if error: + msg = "Error in create_public_key: %s" % error + self.module.fail_json(msg=msg) + + def modify_public_key(self, current, modify): + # not supported in 2.6 + # sourcery skip: dict-comprehension + api = 'security/authentication/publickeys/%s/%s/%d' % (current['owner']['uuid'], current['account'], current['index']) + body = {} + modify_copy = dict(modify) + for key in modify: + if key in ('comment', 'public_key'): + body[key] = modify_copy.pop(key) + if modify_copy: + msg = 'Error: attributes not supported in modify: %s' % modify_copy + self.module.fail_json(msg=msg) + if not body: + msg = 'Error: nothing to change - modify called with: %s' % modify + self.module.fail_json(msg=msg) + if 'public_key' not in body: + # if not present, REST API reports 502 Server Error: Proxy Error for url + body['public_key'] = current['public_key'] + + dummy, error = self.rest_api.patch(api, body) + if error: + msg = "Error in modify_public_key: %s" % error + self.module.fail_json(msg=msg) + + def delete_public_key(self, current): + api = 'security/authentication/publickeys/%s/%s/%d' % (current['owner']['uuid'], current['account'], current['index']) + dummy, error = self.rest_api.delete(api) + if error: + msg = "Error in delete_public_key: %s" % error + self.module.fail_json(msg=msg) + + def get_actions(self): + """Determines whether a create, delete, modify action is required + If index is provided, we expect to find 0 or 1 record. + If index is not provided: + 1. As documented in ONTAP, a create without index should add a new public key. + This is not idempotent, and this rules out a modify operation. + 2. When state is absent, if a single record is found, we assume a delete. + 3. When state is absent, if more than one record is found, a delete action is rejected with 1 exception: + we added a delete_all option, so that all existing keys can be deleted. + """ + cd_action, current, modify = None, None, None + if self.parameters['state'] == 'present' and self.parameters.get('index') is None: + # always create, by keeping current as None + self.module.warn('Module is not idempotent if index is not provided with state=present.') + records = [] + else: + records = self.get_public_keys() + if len(records) > 1: + if self.parameters['state'] == 'absent' and self.parameters.get('delete_all'): + cd_action = 'delete_all' + self.na_helper.changed = True + else: + msg = 'index is required as more than one public_key exists for user account %s: ' % self.parameters['account'] + msg += str(records) + self.module.fail_json(msg='Error: %s' % msg) + elif len(records) == 1: + current = records[0] + + if cd_action is None: + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + if current and 'comment' not in current: + # force an entry as REST does not return anything if no comment was set + current['comment'] = '' + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + return cd_action, modify, records + + def apply(self): + cd_action, modify, records = self.get_actions() + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_public_key() + elif cd_action in ('delete', 'delete_all'): + # there is exactly 1 record for delete + # and 2 or more records for delete_all + for record in records: + self.delete_public_key(record) + elif modify: + # there is exactly 1 record for modify + self.modify_public_key(records[0], modify) + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapPublicKey() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py new file mode 100644 index 000000000..62499fc5e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_adaptive_policy_group.py @@ -0,0 +1,323 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_qos_adaptive_policy_group +short_description: NetApp ONTAP Adaptive Quality of Service policy group. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: 2.9.0 +author: NetApp Ansible Team (@joshedmonds) + +description: + - Create, destroy, modify, or rename an Adaptive QoS policy group on NetApp ONTAP. Module is based on the standard QoS policy group module. + +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified policy group should exist or not. + default: 'present' + type: str + + name: + description: + - The name of the policy group to manage. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + from_name: + description: + - Name of the existing policy group to be renamed to name. + type: str + + absolute_min_iops: + description: + - Absolute minimum IOPS defined by this policy. + type: str + + expected_iops: + description: + - Minimum expected IOPS defined by this policy. + type: str + + peak_iops: + description: + - Maximum possible IOPS per allocated or used TB|GB. + type: str + + peak_iops_allocation: + choices: ['allocated_space', 'used_space'] + description: + - Whether peak_iops is specified by allocated or used space. + default: 'used_space' + type: str + + force: + type: bool + default: False + description: + - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group. +''' + +EXAMPLES = """ + - name: create adaptive qos policy group + netapp.ontap.na_ontap_qos_adaptive_policy_group: + state: present + name: aq_policy_1 + vserver: policy_vserver + absolute_min_iops: 70IOPS + expected_iops: 100IOPS/TB + peak_iops: 250IOPS/TB + peak_iops_allocation: allocated_space + hostname: 10.193.78.30 + username: admin + password: netapp1! + + - name: modify adaptive qos policy group expected iops + netapp.ontap.na_ontap_qos_adaptive_policy_group: + state: present + name: aq_policy_1 + vserver: policy_vserver + absolute_min_iops: 70IOPS + expected_iops: 125IOPS/TB + peak_iops: 250IOPS/TB + peak_iops_allocation: allocated_space + hostname: 10.193.78.30 + username: admin + password: netapp1! + + - name: modify adaptive qos policy group peak iops allocation + netapp.ontap.na_ontap_qos_adaptive_policy_group: + state: present + name: aq_policy_1 + vserver: policy_vserver + absolute_min_iops: 70IOPS + expected_iops: 125IOPS/TB + peak_iops: 250IOPS/TB + peak_iops_allocation: used_space + hostname: 10.193.78.30 + username: admin + password: netapp1! + + - name: delete qos policy group + netapp.ontap.na_ontap_qos_adaptive_policy_group: + state: absent + name: aq_policy_1 + vserver: policy_vserver + hostname: 10.193.78.30 + username: admin + password: netapp1! + +""" + +RETURN = """ +""" + +import traceback + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +class NetAppOntapAdaptiveQosPolicyGroup: + """ + Create, delete, modify and rename a policy group. + """ + def __init__(self): + """ + Initialize the Ontap qos policy group class. + """ + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + vserver=dict(required=True, type='str'), + absolute_min_iops=dict(required=False, type='str'), + expected_iops=dict(required=False, type='str'), + peak_iops=dict(required=False, type='str'), + peak_iops_allocation=dict(choices=['allocated_space', 'used_space'], default='used_space'), + force=dict(required=False, type='bool', default=False) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_replaces('na_ontap_qos_policy_group', self.module) + msg = 'The module only supports ZAPI and is deprecated; netapp.ontap.na_ontap_qos_policy_group should be used instead.' + self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_policy_group(self, policy_group_name=None): + """ + Return details of a policy group. + :param policy_group_name: policy group name + :return: policy group details. + :rtype: dict. + """ + if policy_group_name is None: + policy_group_name = self.parameters['name'] + policy_group_get_iter = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-get-iter') + policy_group_info = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-info') + policy_group_info.add_new_child('policy-group', policy_group_name) + policy_group_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(policy_group_info) + policy_group_get_iter.add_child_elem(query) + result = self.server.invoke_successfully(policy_group_get_iter, True) + policy_group_detail = None + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1: + policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-adaptive-policy-group-info') + + policy_group_detail = { + 'name': policy_info.get_child_content('policy-group'), + 'vserver': policy_info.get_child_content('vserver'), + 'absolute_min_iops': policy_info.get_child_content('absolute-min-iops'), + 'expected_iops': policy_info.get_child_content('expected-iops'), + 'peak_iops': policy_info.get_child_content('peak-iops'), + 'peak_iops_allocation': policy_info.get_child_content('peak-iops-allocation') + } + return policy_group_detail + + def create_policy_group(self): + """ + create a policy group name. + """ + policy_group = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-create') + policy_group.add_new_child('policy-group', self.parameters['name']) + policy_group.add_new_child('vserver', self.parameters['vserver']) + if self.parameters.get('absolute_min_iops'): + policy_group.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops']) + if self.parameters.get('expected_iops'): + policy_group.add_new_child('expected-iops', self.parameters['expected_iops']) + if self.parameters.get('peak_iops'): + policy_group.add_new_child('peak-iops', self.parameters['peak_iops']) + if self.parameters.get('peak_iops_allocation'): + policy_group.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation']) + try: + self.server.invoke_successfully(policy_group, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating adaptive qos policy group %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_policy_group(self, policy_group=None): + """ + delete an existing policy group. + :param policy_group: policy group name. + """ + if policy_group is None: + policy_group = self.parameters['name'] + policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-delete') + policy_group_obj.add_new_child('policy-group', policy_group) + if self.parameters.get('force'): + policy_group_obj.add_new_child('force', str(self.parameters['force'])) + try: + self.server.invoke_successfully(policy_group_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting adaptive qos policy group %s: %s' % + (policy_group, to_native(error)), + exception=traceback.format_exc()) + + def modify_policy_group(self): + """ + Modify policy group. + """ + policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-modify') + policy_group_obj.add_new_child('policy-group', self.parameters['name']) + if self.parameters.get('absolute_min_iops'): + policy_group_obj.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops']) + if self.parameters.get('expected_iops'): + policy_group_obj.add_new_child('expected-iops', self.parameters['expected_iops']) + if self.parameters.get('peak_iops'): + policy_group_obj.add_new_child('peak-iops', self.parameters['peak_iops']) + if self.parameters.get('peak_iops_allocation'): + policy_group_obj.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation']) + try: + self.server.invoke_successfully(policy_group_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying adaptive qos policy group %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def rename_policy_group(self): + """ + Rename policy group name. + """ + rename_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-rename') + rename_obj.add_new_child('new-name', self.parameters['name']) + rename_obj.add_new_child('policy-group-name', self.parameters['from_name']) + try: + self.server.invoke_successfully(rename_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming adaptive qos policy group %s: %s' % + (self.parameters['from_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_helper(self, modify): + """ + helper method to modify policy group. + :param modify: modified attributes. + """ + for attribute in modify.keys(): + if attribute in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'peak_iops_allocation']: + self.modify_policy_group() + + def apply(self): + """ + Run module based on playbook + """ + current, rename = self.get_policy_group(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # form current with from_name. + current = self.get_policy_group(self.parameters['from_name']) + if current is None: + self.module.fail_json(msg='Error: qos adaptive policy igroup with from_name=%s not found' % self.parameters.get('from_name')) + # allow for rename and check for modify with current from from_name. + rename, cd_action = True, None + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if rename: + self.rename_policy_group() + if cd_action == 'create': + self.create_policy_group() + elif cd_action == 'delete': + self.delete_policy_group() + elif modify: + self.modify_helper(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply vserver operations from playbook''' + qos_policy_group = NetAppOntapAdaptiveQosPolicyGroup() + qos_policy_group.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py new file mode 100644 index 000000000..8628efd46 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qos_policy_group.py @@ -0,0 +1,579 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_qos_policy_group +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_qos_policy_group +short_description: NetApp ONTAP manage policy group in Quality of Service. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create, destroy, modify, or rename QoS policy group on NetApp ONTAP. + - With ZAPI, only fixed QoS policy group is supported. + - With REST, both fixed and adaptive QoS policy group are supported. + +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified policy group should exist or not. + default: 'present' + type: str + + name: + description: + - The name of the policy group to manage. + required: true + type: str + + vserver: + description: + - Name of the vserver to use. + required: true + type: str + + from_name: + description: + - Name of the existing policy group to be renamed to name. + type: str + + max_throughput: + description: + - Maximum throughput defined by this policy. + - Not supported with REST, use C(fixed_qos_options). + type: str + + min_throughput: + description: + - Minimum throughput defined by this policy. + - Not supported with REST, use C(fixed_qos_options). + type: str + + is_shared: + description: + - Whether the SLOs of the policy group are shared between the workloads or if the SLOs are applied separately to each workload. + - Not supported with REST, use C(fixed_qos_options). + type: bool + version_added: 20.12.0 + + force: + type: bool + description: + - Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group. + - Not supported with REST. + + fixed_qos_options: + version_added: 21.19.0 + type: dict + description: + - Set Minimum and Maximum throughput defined by this policy. + - Only supported with REST. + - Required one of throughtput options when creating qos_policy. + suboptions: + capacity_shared: + description: + - Whether the SLOs of the policy group are shared between the workloads or if the SLOs are applied separately to each workload. + - Default value is False if not used in creating qos policy. + type: bool + max_throughput_iops: + description: + - Maximum throughput defined by this policy. It is specified in terms of IOPS. + - 0 means no maximum throughput is enforced. + type: int + max_throughput_mbps: + description: + - Maximum throughput defined by this policy. It is specified in terms of Mbps. + - 0 means no maximum throughput is enforced. + type: int + min_throughput_iops: + description: + - Minimum throughput defined by this policy. It is specified in terms of IOPS. + - 0 means no minimum throughput is enforced. + - These floors are not guaranteed on non-AFF platforms or when FabricPool tiering policies are set. + type: int + min_throughput_mbps: + description: + - Minimum throughput defined by this policy. It is specified in terms of Mbps. + - 0 means no minimum throughput is enforced. + - Requires ONTAP 9.8 or later, and REST support. + type: int + + adaptive_qos_options: + version_added: 21.19.0 + type: dict + description: + - Adaptive QoS policy-groups define measurable service level objectives (SLOs) that adjust based on the storage object used space + and the storage object allocated space. + - Only supported with REST. + suboptions: + absolute_min_iops: + description: + - Specifies the absolute minimum IOPS that is used as an override when the expected_iops is less than this value. + - These floors are not guaranteed on non-AFF platforms or when FabricPool tiering policies are set. + type: int + required: true + expected_iops: + description: + - Expected IOPS. Specifies the minimum expected IOPS per TB allocated based on the storage object allocated size. + - These floors are not guaranteed on non-AFF platforms or when FabricPool tiering policies are set. + type: int + required: true + peak_iops: + description: + - Peak IOPS. Specifies the maximum possible IOPS per TB allocated based on the storage object allocated size or + the storage object used size. + type: int + required: true + block_size: + description: + - Specifies the block size. + - Requires ONTAP 9.10.1 or later. + type: str + required: false + choices: ['any', '4k', '8k', '16k', '32k', '64k', '128k'] + version_added: 22.6.0 +''' + +EXAMPLES = """ + - name: create qos policy group in ZAPI. + netapp.ontap.na_ontap_qos_policy_group: + state: present + name: policy_1 + vserver: policy_vserver + max_throughput: 800KB/s,800iops + min_throughput: 100iops + hostname: 10.193.78.30 + username: admin + password: netapp1! + use_rest: never + + - name: modify qos policy group max throughput in ZAPI. + netapp.ontap.na_ontap_qos_policy_group: + state: present + name: policy_1 + vserver: policy_vserver + max_throughput: 900KB/s,800iops + min_throughput: 100iops + hostname: 10.193.78.30 + username: admin + password: netapp1! + use_rest: never + + - name: delete qos policy group + netapp.ontap.na_ontap_qos_policy_group: + state: absent + name: policy_1 + vserver: policy_vserver + hostname: 10.193.78.30 + username: admin + password: netapp1! + + - name: create qos policy group in REST. + netapp.ontap.na_ontap_qos_policy_group: + state: present + name: policy_1 + vserver: policy_vserver + hostname: 10.193.78.30 + username: admin + password: netapp1! + use_rest: always + fixed_qos_options: + max_throughput_iops: 800 + max_throughput_mbps: 200 + min_throughput_iops: 500 + min_throughput_mbps: 100 + capacity_shared: True + + - name: modify qos policy max_throughput in REST. + netapp.ontap.na_ontap_qos_policy_group: + state: present + name: policy_1 + vserver: policy_vserver + hostname: 10.193.78.30 + username: admin + password: netapp1! + use_rest: always + fixed_qos_options: + max_throughput_iops: 1000 + max_throughput_mbps: 300 + + - name: create adaptive qos policy group in REST. + netapp.ontap.na_ontap_qos_policy_group: + state: present + name: adaptive_policy + vserver: policy_vserver + hostname: 10.193.78.30 + username: admin + password: netapp1! + use_rest: always + adaptive_qos_options: + absolute_min_iops: 100 + expected_iops: 200 + peak_iops: 500 + +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapQosPolicyGroup: + """ + Create, delete, modify and rename a policy group. + """ + def __init__(self): + """ + Initialize the Ontap qos policy group class. + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + vserver=dict(required=True, type='str'), + max_throughput=dict(required=False, type='str'), + min_throughput=dict(required=False, type='str'), + is_shared=dict(required=False, type='bool'), + force=dict(required=False, type='bool'), + fixed_qos_options=dict(required=False, type='dict', options=dict( + capacity_shared=dict(required=False, type='bool'), + max_throughput_iops=dict(required=False, type='int'), + max_throughput_mbps=dict(required=False, type='int'), + min_throughput_iops=dict(required=False, type='int'), + min_throughput_mbps=dict(required=False, type='int') + )), + adaptive_qos_options=dict(required=False, type='dict', options=dict( + absolute_min_iops=dict(required=True, type='int'), + expected_iops=dict(required=True, type='int'), + peak_iops=dict(required=True, type='int'), + block_size=dict(required=False, type='str', choices=['any', '4k', '8k', '16k', '32k', '64k', '128k']) + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['max_throughput', 'fixed_qos_options'], + ['min_throughput', 'fixed_qos_options'], + ['max_throughput', 'adaptive_qos_options'], + ['min_throughput', 'adaptive_qos_options'], + ['fixed_qos_options', 'adaptive_qos_options'], + ['is_shared', 'adaptive_qos_options'], + ['is_shared', 'fixed_qos_options'] + ] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['is_shared', 'max_throughput', 'min_throughput', 'force'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + + if self.use_rest and self.parameters['state'] == 'present': + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8) and \ + self.na_helper.safe_get(self.parameters, ['fixed_qos_options', 'min_throughput_mbps']): + self.module.fail_json(msg="Minimum version of ONTAP for 'fixed_qos_options.min_throughput_mbps' is (9, 8, 0)") + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1) and \ + self.na_helper.safe_get(self.parameters, ['adaptive_qos_options', 'block_size']): + self.module.fail_json(msg="Minimum version of ONTAP for 'adaptive_qos_options.block_size' is (9, 10, 1)") + self.uuid = None + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if 'adaptive_qos_options' in self.parameters: + self.module.fail_json(msg="Error: use 'na_ontap_qos_adaptive_policy_group' module for create/modify/delete adaptive policy with ZAPI") + if 'fixed_qos_options' in self.parameters and self.parameters['state'] == 'present': + self.module.fail_json(msg="Error: 'fixed_qos_options' not supported with ZAPI, use 'max_throughput' and 'min_throughput'") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + # default value for force is false in ZAPI. + self.parameters['force'] = False + + def get_policy_group(self, policy_group_name=None): + """ + Return details of a policy group. + :param policy_group_name: policy group name + :return: policy group details. + :rtype: dict. + """ + if policy_group_name is None: + policy_group_name = self.parameters['name'] + if self.use_rest: + return self.get_policy_group_rest(policy_group_name) + policy_group_get_iter = netapp_utils.zapi.NaElement('qos-policy-group-get-iter') + policy_group_info = netapp_utils.zapi.NaElement('qos-policy-group-info') + policy_group_info.add_new_child('policy-group', policy_group_name) + policy_group_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(policy_group_info) + policy_group_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(policy_group_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching qos policy group %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + policy_group_detail = None + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1: + policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-policy-group-info') + + policy_group_detail = { + 'name': policy_info.get_child_content('policy-group'), + 'vserver': policy_info.get_child_content('vserver'), + 'max_throughput': policy_info.get_child_content('max-throughput'), + 'min_throughput': policy_info.get_child_content('min-throughput'), + 'is_shared': self.na_helper.get_value_for_bool(True, policy_info.get_child_content('is-shared')) + } + return policy_group_detail + + def get_policy_group_rest(self, policy_group_name): + api = 'storage/qos/policies' + query = { + 'name': policy_group_name, + 'svm.name': self.parameters['vserver'] + } + fields = 'name,svm' + if 'fixed_qos_options' in self.parameters: + fields += ',fixed' + elif 'adaptive_qos_options' in self.parameters: + fields += ',adaptive' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error fetching qos policy group %s: %s' % + (self.parameters['name'], error)) + current = None + if record: + self.uuid = record['uuid'] + current = { + 'name': record['name'], + 'vserver': record['svm']['name'] + } + + if 'fixed' in record: + current['fixed_qos_options'] = {} + for fixed_qos_option in ['capacity_shared', 'max_throughput_iops', 'max_throughput_mbps', 'min_throughput_iops']: + current['fixed_qos_options'][fixed_qos_option] = record['fixed'].get(fixed_qos_option) + if self.na_helper.safe_get(self.parameters, ['fixed_qos_options', 'min_throughput_mbps']): + current['fixed_qos_options']['min_throughput_mbps'] = record['fixed'].get('min_throughput_mbps') + + if 'adaptive' in record: + current['adaptive_qos_options'] = {} + for adaptive_qos_option in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'block_size']: + current['adaptive_qos_options'][adaptive_qos_option] = record['adaptive'].get(adaptive_qos_option) + return current + + def create_policy_group(self): + """ + create a policy group name. + """ + if self.use_rest: + return self.create_policy_group_rest() + policy_group = netapp_utils.zapi.NaElement('qos-policy-group-create') + policy_group.add_new_child('policy-group', self.parameters['name']) + policy_group.add_new_child('vserver', self.parameters['vserver']) + if self.parameters.get('max_throughput'): + policy_group.add_new_child('max-throughput', self.parameters['max_throughput']) + if self.parameters.get('min_throughput'): + policy_group.add_new_child('min-throughput', self.parameters['min_throughput']) + if self.parameters.get('is_shared') is not None: + policy_group.add_new_child('is-shared', self.na_helper.get_value_for_bool(False, self.parameters['is_shared'])) + try: + self.server.invoke_successfully(policy_group, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating qos policy group %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def create_policy_group_rest(self): + api = 'storage/qos/policies' + body = { + 'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'] + } + if 'fixed_qos_options' in self.parameters: + body['fixed'] = self.na_helper.filter_out_none_entries(self.parameters['fixed_qos_options']) + # default value for capacity_shared is False in REST. + if self.na_helper.safe_get(body, ['fixed', 'capacity_shared']) is None: + body['fixed']['capacity_shared'] = False + else: + body['adaptive'] = self.na_helper.filter_out_none_entries(self.parameters['adaptive_qos_options']) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating qos policy group %s: %s' % + (self.parameters['name'], error)) + + def delete_policy_group(self, policy_group=None): + """ + delete an existing policy group. + :param policy_group: policy group name. + """ + if self.use_rest: + return self.delete_policy_group_rest() + if policy_group is None: + policy_group = self.parameters['name'] + policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-delete') + policy_group_obj.add_new_child('policy-group', policy_group) + if self.parameters.get('force'): + policy_group_obj.add_new_child('force', str(self.parameters['force'])) + try: + self.server.invoke_successfully(policy_group_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting qos policy group %s: %s' % + (policy_group, to_native(error)), + exception=traceback.format_exc()) + + def delete_policy_group_rest(self): + api = 'storage/qos/policies' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg='Error deleting qos policy group %s: %s' % + (self.parameters['name'], error)) + + def modify_policy_group(self, modify): + """ + Modify policy group. + """ + if self.use_rest: + return self.modify_policy_group_rest(modify) + policy_group_obj = netapp_utils.zapi.NaElement('qos-policy-group-modify') + policy_group_obj.add_new_child('policy-group', self.parameters['name']) + if self.parameters.get('max_throughput'): + policy_group_obj.add_new_child('max-throughput', self.parameters['max_throughput']) + if self.parameters.get('min_throughput'): + policy_group_obj.add_new_child('min-throughput', self.parameters['min_throughput']) + try: + self.server.invoke_successfully(policy_group_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying qos policy group %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_policy_group_rest(self, modify): + api = 'storage/qos/policies' + body = {} + if 'fixed_qos_options' in modify: + body['fixed'] = modify['fixed_qos_options'] + else: + body['adaptive'] = self.parameters['adaptive_qos_options'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg='Error modifying qos policy group %s: %s' % + (self.parameters['name'], error)) + + def rename_policy_group(self): + """ + Rename policy group name. + """ + if self.use_rest: + return self.rename_policy_group_rest() + rename_obj = netapp_utils.zapi.NaElement('qos-policy-group-rename') + rename_obj.add_new_child('new-name', self.parameters['name']) + rename_obj.add_new_child('policy-group-name', self.parameters['from_name']) + try: + self.server.invoke_successfully(rename_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming qos policy group %s: %s' % + (self.parameters['from_name'], to_native(error)), + exception=traceback.format_exc()) + + def rename_policy_group_rest(self): + api = 'storage/qos/policies' + body = {'name': self.parameters['name']} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg='Error renaming qos policy group %s: %s' % + (self.parameters['from_name'], error)) + + def modify_helper(self, modify): + """ + helper method to modify policy group. + :param modify: modified attributes. + """ + if any( + attribute in modify + for attribute in ['max_throughput', 'min_throughput', 'fixed_qos_options', 'adaptive_qos_options'] + ): + self.modify_policy_group(modify) + + def validate_adaptive_or_fixed_qos_options(self): + error = None + # one of the fixed throughput option required in create qos_policy. + if 'fixed_qos_options' in self.parameters: + fixed_options = ['max_throughput_iops', 'max_throughput_mbps', 'min_throughput_iops', 'min_throughput_mbps'] + if not any(x in self.na_helper.filter_out_none_entries(self.parameters['fixed_qos_options']) for x in fixed_options): + error = True + # error if both fixed_qos_options or adaptive_qos_options not present in creating qos policy. + elif self.parameters.get('fixed_qos_options', self.parameters.get('adaptive_qos_options')) is None: + error = True + return error + + def apply(self): + """ + Run module based on playbook + """ + current = self.get_policy_group() + rename, cd_action = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # create policy by renaming an existing one + old_policy = self.get_policy_group(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(old_policy, current) + if rename: + current = old_policy + cd_action = None + if rename is None: + self.module.fail_json(msg='Error renaming qos policy group: cannot find %s' % + self.parameters['from_name']) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else {} + if 'is_shared' in modify or self.na_helper.safe_get(modify, ['fixed_qos_options', 'capacity_shared']) is not None: + self.module.fail_json(msg="Error cannot modify '%s' attribute." % + ('is_shared' if 'is_shared' in modify else 'fixed_qos_options.capacity_shared')) + if self.use_rest and cd_action == 'create' and self.validate_adaptive_or_fixed_qos_options(): + error = "Error: atleast one throughput in 'fixed_qos_options' or all 'adaptive_qos_options' required in creating qos_policy in REST." + self.module.fail_json(msg=error) + if self.na_helper.changed and not self.module.check_mode: + if rename: + self.rename_policy_group() + if cd_action == 'create': + self.create_policy_group() + elif cd_action == 'delete': + self.delete_policy_group() + elif modify: + self.modify_helper(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply vserver operations from playbook''' + qos_policy_group = NetAppOntapQosPolicyGroup() + qos_policy_group.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py new file mode 100644 index 000000000..3451078d7 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_qtree.py @@ -0,0 +1,462 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_qtree +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_qtree + +short_description: NetApp ONTAP manage qtrees +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create/Modify/Delete Qtrees. + +options: + + state: + description: + - Whether the specified qtree should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the qtree to manage. + - With REST, this can also be a path. + required: true + type: str + + from_name: + description: + - Name of the qtree to be renamed. + version_added: 2.7.0 + type: str + + flexvol_name: + description: + - The name of the FlexVol the qtree should exist on. + required: true + type: str + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + + export_policy: + description: + - The name of the export policy to apply. + version_added: 2.9.0 + type: str + + security_style: + description: + - The security style for the qtree. + choices: ['unix', 'ntfs', 'mixed'] + type: str + version_added: 2.9.0 + + oplocks: + description: + - Whether the oplocks should be enabled or not for the qtree. + choices: ['enabled', 'disabled'] + type: str + version_added: 2.9.0 + + unix_permissions: + description: + - File permissions bits of the qtree. + - Accepts either octal or string format. + - Examples 0777, 777 in octal and ---rwxrwxrwx, sstrwxrwxrwx, rwxrwxrwx in string format. + version_added: 2.9.0 + type: str + + force_delete: + description: + - Whether the qtree should be deleted even if files still exist. + - Note that the default of true reflect the REST API behavior. + - a value of false is not supported with REST. + type: bool + default: true + version_added: 20.8.0 + + wait_for_completion: + description: + - Only applicable for REST. When using ZAPI, the deletion is always synchronous. + - Deleting a qtree may take time if many files need to be deleted. + - Set this parameter to 'true' for synchronous execution during delete. + - Set this parameter to 'false' for asynchronous execution. + - For asynchronous, execution exits as soon as the request is sent, and the qtree is deleted in background. + type: bool + default: true + version_added: 2.9.0 + + time_out: + description: + - Maximum time to wait for qtree deletion in seconds when wait_for_completion is True. + - Error out if task is not completed in defined time. + - Default is set to 3 minutes. + default: 180 + type: int + version_added: 2.9.0 + + unix_user: + description: + - The user set as owner of the qtree. + - Only supported with REST and ONTAP 9.9 or later. + type: str + version_added: 21.21.0 + + unix_group: + description: + - The group set as owner of the qtree. + - Only supported with REST and ONTAP 9.9 or later. + type: str + version_added: 21.21.0 + +''' + +EXAMPLES = """ +- name: Create Qtrees. + netapp.ontap.na_ontap_qtree: + state: present + name: ansibleQTree + flexvol_name: ansibleVolume + export_policy: policyName + security_style: mixed + oplocks: disabled + unix_permissions: 0777 + vserver: ansibleVServer + unix_user: user1 + unix_group: group1 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: Rename Qtrees. + netapp.ontap.na_ontap_qtree: + state: present + from_name: ansibleQTree + name: ansibleQTree_rename + flexvol_name: ansibleVolume + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: modify Qtrees unix_permissions using string format. + netapp.ontap.na_ontap_qtree: + state: present + name: ansibleQTree_rename + flexvol_name: ansibleVolume + vserver: ansibleVServer + unix_permissions: sstrwxrwxrwx + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +- name: delete Qtrees. + netapp.ontap.na_ontap_qtree: + state: absent + name: ansibleQTree_rename + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapQTree: + '''Class with qtree operations''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + flexvol_name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + export_policy=dict(required=False, type='str'), + security_style=dict(required=False, type='str', choices=['unix', 'ntfs', 'mixed']), + oplocks=dict(required=False, type='str', choices=['enabled', 'disabled']), + unix_permissions=dict(required=False, type='str'), + force_delete=dict(required=False, type='bool', default=True), + wait_for_completion=dict(required=False, type='bool', default=True), + time_out=dict(required=False, type='int', default=180), + unix_user=dict(required=False, type='str'), + unix_group=dict(required=False, type='str') + )) + self.volume_uuid, self.qid = None, None + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['flexvol_name']) + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['oplocks'] + partially_supported_rest_properties = [['unix_user', (9, 9)], ['unix_group', (9, 9)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_qtree(self, name=None): + """ + Checks if the qtree exists. + :param: + name : qtree name + :return: + Details about the qtree + False if qtree is not found + :rtype: bool + """ + if name is None: + name = self.parameters['name'] + if self.use_rest: + api = "storage/qtrees" + query = {'fields': 'export_policy,unix_permissions,security_style,volume', + 'svm.name': self.parameters['vserver'], + 'volume': self.parameters['flexvol_name'], + 'name': '"' + name + '"'} + if 'unix_user' in self.parameters: + query['fields'] += ',user.name' + if 'unix_group' in self.parameters: + query['fields'] += ',group.name' + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + msg = "Error fetching qtree: %s" % error + self.module.fail_json(msg=msg) + if record: + self.volume_uuid = record['volume']['uuid'] + self.qid = str(record['id']) + return { + 'name': record['name'], + 'export_policy': self.na_helper.safe_get(record, ['export_policy', 'name']), + 'security_style': self.na_helper.safe_get(record, ['security_style']), + 'unix_permissions': str(self.na_helper.safe_get(record, ['unix_permissions'])), + 'unix_user': self.na_helper.safe_get(record, ['user', 'name']), + 'unix_group': self.na_helper.safe_get(record, ['group', 'name']) + } + return None + + qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-info', **{'vserver': self.parameters['vserver'], + 'volume': self.parameters['flexvol_name'], + 'qtree': name}) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + qtree_list_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(qtree_list_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching qtree: %s' % to_native(error), + exception=traceback.format_exc()) + return_q = None + if (result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1): + return_q = {'export_policy': result['attributes-list']['qtree-info']['export-policy'], + 'oplocks': result['attributes-list']['qtree-info']['oplocks'], + 'security_style': result['attributes-list']['qtree-info']['security-style']} + + value = self.na_helper.safe_get(result, ['attributes-list', 'qtree-info', 'mode']) + return_q['unix_permissions'] = value if value is not None else '' + + return return_q + + def create_qtree(self): + """ + Create a qtree + """ + if self.use_rest: + api = "storage/qtrees" + body = {'volume': {'name': self.parameters['flexvol_name']}, + 'svm': {'name': self.parameters['vserver']}} + body.update(self.form_create_modify_body_rest()) + query = dict(return_timeout=10) + dummy, error = rest_generic.post_async(self.rest_api, api, body, query) + if error: + if "job reported error:" in error and "entry doesn't exist" in error: + # ignore RBAC issue with FSx - BURT1525998 + self.module.warn('Ignoring job status, assuming success.') + return + self.module.fail_json(msg='Error creating qtree %s: %s' % (self.parameters['name'], error)) + else: + self.create_or_modify_qtree_zapi('qtree-create', "Error creating qtree %s: %s") + + def delete_qtree(self): + """ + Delete a qtree + """ + if self.use_rest: + api = "storage/qtrees/%s" % self.volume_uuid + query = {'return_timeout': 3} + response, error = rest_generic.delete_async(self.rest_api, api, self.qid, query) + if self.parameters['wait_for_completion']: + dummy, error = rrh.check_for_error_and_job_results(api, response, error, self.rest_api) + if error: + self.module.fail_json(msg='Error deleting qtree %s: %s' % (self.parameters['name'], error)) + + else: + path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name']) + options = {'qtree': path} + if self.parameters['force_delete']: + options['force'] = "true" + qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-delete', **options) + + try: + self.server.invoke_successfully(qtree_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(error)), + exception=traceback.format_exc()) + + def rename_qtree(self): + """ + Rename a qtree + """ + if self.use_rest: + error = 'Internal error, use modify with REST' + self.module.fail_json(msg=error) + else: + path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['from_name']) + new_path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name']) + qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'qtree-rename', **{'qtree': path, + 'new-qtree-name': new_path}) + + try: + self.server.invoke_successfully(qtree_rename, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error renaming qtree %s: %s" + % (self.parameters['from_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_qtree(self): + """ + Modify a qtree + """ + if self.use_rest: + body = self.form_create_modify_body_rest() + api = "storage/qtrees/%s" % self.volume_uuid + query = dict(return_timeout=10) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.qid, body, query) + if error: + self.module.fail_json(msg='Error modifying qtree %s: %s' % (self.parameters['name'], error)) + else: + self.create_or_modify_qtree_zapi('qtree-modify', 'Error modifying qtree %s: %s') + + def create_or_modify_qtree_zapi(self, zapi_request_name, error_message): + options = {'qtree': self.parameters['name'], 'volume': self.parameters['flexvol_name']} + + if self.parameters.get('export_policy'): + options['export-policy'] = self.parameters['export_policy'] + if self.parameters.get('security_style'): + options['security-style'] = self.parameters['security_style'] + if self.parameters.get('oplocks'): + options['oplocks'] = self.parameters['oplocks'] + if self.parameters.get('unix_permissions'): + options['mode'] = self.parameters['unix_permissions'] + zapi_request = netapp_utils.zapi.NaElement.create_node_with_children(zapi_request_name, **options) + + try: + self.server.invoke_successfully(zapi_request, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg=(error_message % (self.parameters['name'], to_native(error))), exception=traceback.format_exc()) + + def form_create_modify_body_rest(self): + body = {'name': self.parameters['name']} + if self.parameters.get('security_style'): + body['security_style'] = self.parameters['security_style'] + if self.parameters.get('unix_permissions'): + body['unix_permissions'] = self.parameters['unix_permissions'] + if self.parameters.get('export_policy'): + body['export_policy'] = {'name': self.parameters['export_policy']} + if self.parameters.get('unix_user'): + body['user'] = {'name': self.parameters['unix_user']} + if self.parameters.get('unix_group'): + body['group'] = {'name': self.parameters['unix_group']} + return body + + def apply(self): + '''Call create/delete/modify/rename operations''' + current = self.get_qtree() + rename, cd_action, modify = None, None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + current = self.get_qtree(self.parameters['from_name']) + if current is None: + self.module.fail_json(msg="Error renaming: qtree %s does not exist" % self.parameters['from_name']) + cd_action = None + if not self.use_rest: + # modify can change the name for REST, as UUID is the key. + rename = True + + if cd_action is None: + octal_value = current.get('unix_permissions') if current else None + if self.parameters.get('unix_permissions')\ + and self.na_helper.compare_chmod_value(octal_value, self.parameters['unix_permissions']): + del self.parameters['unix_permissions'] + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.use_rest and cd_action == 'delete' and not self.parameters['force_delete']: + self.module.fail_json(msg='Error: force_delete option is not supported for REST, unless set to true.') + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_qtree() + elif cd_action == 'delete': + self.delete_qtree() + else: + if rename: + self.rename_qtree() + if modify: + self.modify_qtree() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply qtree operations from playbook''' + qtree_obj = NetAppOntapQTree() + qtree_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py new file mode 100644 index 000000000..d2604c62c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py @@ -0,0 +1,257 @@ +#!/usr/bin/python + +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_quota_policy +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = """ +module: na_ontap_quota_policy +short_description: NetApp Ontap create, assign, rename or delete quota policy +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: '19.11.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Create, assign, rename or delete the quota policy + - This module only supports ZAPI and is deprecated. + - The final version of ONTAP to support ZAPI is 9.12.1. +options: + state: + description: + - Whether the specified quota policy should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver for the quota policy. + required: true + type: str + + name: + description: + - Specifies the quota policy name to create or rename to. + required: true + type: str + + from_name: + description: + - Name of the existing quota policy to be renamed to name. + type: str + + auto_assign: + description: + - when true, assign the policy to the vserver, whether it is newly created, renamed, or already exists. + - when true, the policy identified by name replaces the already assigned policy. + - when false, the policy is created if it does not already exist but is not assigned. + type: bool + default: true + version_added: 20.12.0 +""" + +EXAMPLES = """ + - name: Create quota policy + na_ontap_quota_policy: + state: present + vserver: SVM1 + name: ansible_policy + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Rename quota policy + na_ontap_quota_policy: + state: present + vserver: SVM1 + name: new_ansible + from_name: ansible + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Delete quota policy + na_ontap_quota_policy: + state: absent + vserver: SVM1 + name: ansible_policy + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import zapis_svm + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapQuotaPolicy(object): + """ + Create, assign, rename or delete a quota policy + """ + + def __init__(self): + """ + Initialize the ONTAP quota policy class + """ + + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + auto_assign=dict(required=False, type='bool', default=True), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['name', 'vserver']) + ], + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_deprecated(self.module) + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg='The python NetApp-Lib module is required') + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_quota_policy(self, policy_name=None): + + if policy_name is None: + policy_name = self.parameters['name'] + + return_value = None + quota_policy_get_iter = netapp_utils.zapi.NaElement('quota-policy-get-iter') + quota_policy_info = netapp_utils.zapi.NaElement('quota-policy-info') + quota_policy_info.add_new_child('policy-name', policy_name) + quota_policy_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(quota_policy_info) + quota_policy_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(quota_policy_get_iter, True) + if result.get_child_by_name('attributes-list'): + quota_policy_attributes = result['attributes-list']['quota-policy-info'] + return_value = { + 'name': quota_policy_attributes['policy-name'] + } + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching quota policy %s: %s' % (policy_name, to_native(error)), + exception=traceback.format_exc()) + return return_value + + def create_quota_policy(self): + """ + Creates a new quota policy + """ + quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-create") + quota_policy_obj.add_new_child("policy-name", self.parameters['name']) + quota_policy_obj.add_new_child("vserver", self.parameters['vserver']) + try: + self.server.invoke_successfully(quota_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating quota policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_quota_policy(self): + """ + Deletes a quota policy + """ + quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-delete") + quota_policy_obj.add_new_child("policy-name", self.parameters['name']) + try: + self.server.invoke_successfully(quota_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting quota policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def rename_quota_policy(self): + """ + Rename a quota policy + """ + quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-rename") + quota_policy_obj.add_new_child("policy-name", self.parameters['from_name']) + quota_policy_obj.add_new_child("vserver", self.parameters['vserver']) + quota_policy_obj.add_new_child("new-policy-name", self.parameters['name']) + try: + self.server.invoke_successfully(quota_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming quota policy %s: %s' % (self.parameters['from_name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_quota_policy() + # rename and create are mutually exclusive + rename, cd_action = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # create policy by renaming it + rename = self.na_helper.is_rename_action(self.get_quota_policy(self.parameters['from_name']), current) + if rename is None: + self.module.fail_json(msg='Error renaming quota policy: %s does not exist.' % self.parameters['from_name']) + + # check if policy should be assigned + assign_policy = cd_action == 'create' and self.parameters['auto_assign'] + if cd_action is None and current and self.parameters['auto_assign']: + # find out if the existing policy needs to be changed + svm = zapis_svm.get_vserver(self.server, self.parameters['vserver']) + if svm.get('quota_policy') != self.parameters['name']: + assign_policy = True + self.na_helper.changed = True + if cd_action == 'delete': + # can't delete if already assigned + svm = zapis_svm.get_vserver(self.server, self.parameters['vserver']) + if svm.get('quota_policy') == self.parameters['name']: + self.module.fail_json(msg='Error policy %s cannot be deleted as it is assigned to the vserver %s' % + (self.parameters['name'], self.parameters['vserver'])) + + if self.na_helper.changed and not self.module.check_mode: + if rename: + self.rename_quota_policy() + elif cd_action == 'create': + self.create_quota_policy() + elif cd_action == 'delete': + self.delete_quota_policy() + if assign_policy: + zapis_svm.modify_vserver(self.server, self.module, self.parameters['vserver'], modify=dict(quota_policy=self.parameters['name'])) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap quota policy object and runs the correct play task + """ + obj = NetAppOntapQuotaPolicy() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py new file mode 100644 index 000000000..1aca89feb --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quotas.py @@ -0,0 +1,890 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +''' +na_ontap_quotas +''' + + +DOCUMENTATION = ''' +module: na_ontap_quotas +short_description: NetApp ONTAP Quotas +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Set/Modify/Delete quota on ONTAP +options: + state: + description: + - Whether the specified quota should exist or not. + choices: ['present', 'absent'] + default: present + type: str + vserver: + required: true + description: + - Name of the vserver to use. + type: str + volume: + description: + - The name of the volume that the quota resides on. + required: true + type: str + quota_target: + description: + - The quota target of the type specified. + - Required to create or modify a rule. + - users and group takes quota_target value in REST. + - For default user and group quota rules, the quota_target must be specified as "". + type: str + qtree: + description: + - Name of the qtree for the quota. + - For user or group rules, it can be the qtree name or "" if no qtree. + - For tree type rules, this field must be "". + default: "" + type: str + type: + description: + - The type of quota rule + - Required to create or modify a rule. + choices: ['user', 'group', 'tree'] + type: str + policy: + description: + - Name of the quota policy from which the quota rule should be obtained. + - Only supported with ZAPI. + - Multiple alternative quota policies (active and backup) are not supported in REST. + - REST manages the quota rules of the active policy. + type: str + set_quota_status: + description: + - Whether the specified volume should have quota status on or off. + type: bool + perform_user_mapping: + description: + - Whether quota management will perform user mapping for the user specified in quota-target. + - User mapping can be specified only for a user quota rule. + type: bool + aliases: ['user_mapping'] + version_added: 20.12.0 + file_limit: + description: + - The number of files that the target can have. + - use '-' to reset file limit. + type: str + disk_limit: + description: + - The amount of disk space that is reserved for the target. + - Expects a number followed with B (for bytes), KB, MB, GB, TB. + - If the unit is not present KB is used by default. + - Examples - 10MB, 20GB, 1TB, 20B, 10. + - In REST, if limit is less than 1024 bytes, the value is rounded up to 1024 bytes. + - use '-' to reset disk limit. + type: str + soft_file_limit: + description: + - The number of files the target would have to exceed before a message is logged and an SNMP trap is generated. + - use '-' to reset soft file limit. + type: str + soft_disk_limit: + description: + - The amount of disk space the target would have to exceed before a message is logged and an SNMP trap is generated. + - See C(disk_limit) for format description. + - In REST, if limit is less than 1024 bytes, the value is rounded up to 1024 bytes. + - use '-' to reset soft disk limit. + type: str + threshold: + description: + - The amount of disk space the target would have to exceed before a message is logged. + - See C(disk_limit) for format description. + - Only supported with ZAPI. + type: str + activate_quota_on_change: + description: + - Method to use to activate quota on a change. + - Default value is 'resize' in ZAPI. + - With REST, Changes to quota rule limits C(file_limit), C(disk_limit), C(soft_file_limit), and C(soft_disk_limit) are applied automatically + without requiring a quota resize operation. + choices: ['resize', 'reinitialize', 'none'] + type: str + version_added: 20.12.0 + +''' + +EXAMPLES = """ + - name: Create quota rule in ZAPI. + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: user1 + type: user + policy: ansible + file_limit: 2 + disk_limit: 3 + set_quota_status: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Resize quota + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: user1 + type: user + policy: ansible + file_limit: 2 + disk_limit: 3 + set_quota_status: True + activate_quota_on_change: resize + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Reinitialize quota + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: user1 + type: user + policy: ansible + file_limit: 2 + disk_limit: 3 + set_quota_status: True + activate_quota_on_change: reinitialize + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: modify quota + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: user1 + type: user + policy: ansible + file_limit: 2 + disk_limit: 3 + threshold: 3 + set_quota_status: False + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete quota + netapp.ontap.na_ontap_quotas: + state: absent + vserver: ansible + volume: ansible + quota_target: /vol/ansible + type: user + policy: ansible + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Add/Set quota rule for type user in REST. + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: "user1,user2" + qtree: qtree + type: user + file_limit: 2 + disk_limit: 3 + set_quota_status: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Modify quota reset file limit and modify disk limit. + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: "user1,user2" + qtree: qtree + type: user + file_limit: "-" + disk_limit: 100 + set_quota_status: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Add/Set quota rule for type group in REST. + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: group1 + qtree: qtree + type: group + file_limit: 2 + disk_limit: 3 + set_quota_status: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Add/Set quota rule for type qtree in REST. + netapp.ontap.na_ontap_quotas: + state: present + vserver: ansible + volume: ansible + quota_target: qtree1 + type: qtree + file_limit: 2 + disk_limit: 3 + set_quota_status: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ + +""" + +import time +import traceback +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppONTAPQuotas: + '''Class with quotas methods''' + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + volume=dict(required=True, type='str'), + quota_target=dict(required=False, type='str'), + qtree=dict(required=False, type='str', default=""), + type=dict(required=False, type='str', choices=['user', 'group', 'tree']), + policy=dict(required=False, type='str'), + set_quota_status=dict(required=False, type='bool'), + perform_user_mapping=dict(required=False, type='bool', aliases=['user_mapping']), + file_limit=dict(required=False, type='str'), + disk_limit=dict(required=False, type='str'), + soft_file_limit=dict(required=False, type='str'), + soft_disk_limit=dict(required=False, type='str'), + threshold=dict(required=False, type='str'), + activate_quota_on_change=dict(required=False, type='str', choices=['resize', 'reinitialize', 'none']) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_by={ + 'policy': ['quota_target', 'type'], + 'perform_user_mapping': ['quota_target', 'type'], + 'file_limit': ['quota_target', 'type'], + 'disk_limit': ['quota_target', 'type'], + 'soft_file_limit': ['quota_target', 'type'], + 'soft_disk_limit': ['quota_target', 'type'], + 'threshold': ['quota_target', 'type'], + }, + required_together=[('quota_target', 'type')] + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + unsupported_rest_properties = ['policy', 'threshold'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + self.volume_uuid = None # volume UUID after quota rule creation, used for on or off quota status + self.quota_uuid = None + self.warn_msg = None + self.validate_parameters_ZAPI_REST() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def validate_parameters_ZAPI_REST(self): + if self.use_rest: + if self.parameters.get('type') == 'tree': + if self.parameters['qtree']: + self.module.fail_json(msg="Error: Qtree cannot be specified for a tree type rule, it should be ''.") + # valid qtree name for ZAPI is /vol/vol_name/qtree_name and REST is qtree_name. + if '/' in self.parameters.get('quota_target', ''): + self.parameters['quota_target'] = self.parameters['quota_target'].split('/')[-1] + for quota_limit in ['file_limit', 'disk_limit', 'soft_file_limit', 'soft_disk_limit']: + if self.parameters.get(quota_limit) == '-1': + self.parameters[quota_limit] = '-' + else: + # converted blank parameter to * as shown in vsim + if self.parameters.get('quota_target') == "": + self.parameters['quota_target'] = '*' + if not self.parameters.get('activate_quota_on_change'): + self.parameters['activate_quota_on_change'] = 'resize' + size_format_error_message = "input string is not a valid size format. A valid size format is constructed as" \ + ". For example, '10MB', '10KB'. Only numeric input is also valid." \ + "The default unit size is KB." + if self.parameters.get('disk_limit') and self.parameters['disk_limit'] != '-' and not self.convert_to_kb_or_bytes('disk_limit'): + self.module.fail_json(msg='disk_limit %s' % size_format_error_message) + if self.parameters.get('soft_disk_limit') and self.parameters['soft_disk_limit'] != '-' and not self.convert_to_kb_or_bytes('soft_disk_limit'): + self.module.fail_json(msg='soft_disk_limit %s' % size_format_error_message) + if self.parameters.get('threshold') and self.parameters['threshold'] != '-' and not self.convert_to_kb_or_bytes('threshold'): + self.module.fail_json(msg='threshold %s' % size_format_error_message) + + def get_quota_status(self): + """ + Return details about the quota status + :param: + name : volume name + :return: status of the quota. None if not found. + :rtype: dict + """ + quota_status_get = netapp_utils.zapi.NaElement('quota-status') + quota_status_get.translate_struct({ + 'volume': self.parameters['volume'] + }) + try: + result = self.server.invoke_successfully(quota_status_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching quotas status info: %s' % to_native(error), + exception=traceback.format_exc()) + return result['status'] + + def get_quotas_with_retry(self, get_request, policy): + return_values = None + if policy is not None: + get_request['query']['quota-entry'].add_new_child('policy', policy) + try: + result = self.server.invoke_successfully(get_request, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + # Bypass a potential issue in ZAPI when policy is not set in the query + # https://github.com/ansible-collections/netapp.ontap/issues/4 + # BURT1076601 Loop detected in next() for table quota_rules_zapi + if policy is None and 'Reason - 13001:success' in to_native(error): + result = None + return_values = self.debug_quota_get_error(error) + else: + self.module.fail_json(msg='Error fetching quotas info for policy %s: %s' + % (policy, to_native(error)), + exception=traceback.format_exc()) + return result, return_values + + def get_quotas(self, policy=None): + """ + Get quota details + :return: name of volume if quota exists, None otherwise + """ + if self.parameters.get('type') is None: + return None + if policy is None: + policy = self.parameters.get('policy') + quota_get = netapp_utils.zapi.NaElement('quota-list-entries-iter') + query = { + 'query': { + 'quota-entry': { + 'volume': self.parameters['volume'], + 'quota-target': self.parameters['quota_target'], + 'quota-type': self.parameters['type'], + 'vserver': self.parameters['vserver'], + 'qtree': self.parameters['qtree'] or '""' + } + } + } + quota_get.translate_struct(query) + result, return_values = self.get_quotas_with_retry(quota_get, policy) + if result is None: + return return_values + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + # if quota-target is '*', the query treats it as a wildcard. But a blank entry is represented as '*'. + # Hence the need to loop through all records to find a match. + for quota_entry in result.get_child_by_name('attributes-list').get_children(): + quota_target = quota_entry.get_child_content('quota-target') + if quota_target == self.parameters['quota_target']: + return_values = {'volume': quota_entry.get_child_content('volume'), + 'file_limit': quota_entry.get_child_content('file-limit'), + 'disk_limit': quota_entry.get_child_content('disk-limit'), + 'soft_file_limit': quota_entry.get_child_content('soft-file-limit'), + 'soft_disk_limit': quota_entry.get_child_content('soft-disk-limit'), + 'threshold': quota_entry.get_child_content('threshold')} + value = self.na_helper.safe_get(quota_entry, ['perform-user-mapping']) + if value is not None: + return_values['perform_user_mapping'] = self.na_helper.get_value_for_bool(True, value) + return return_values + return None + + def get_quota_policies(self): + """ + Get list of quota policies + :return: list of quota policies (empty list if None found) + """ + quota_policy_get = netapp_utils.zapi.NaElement('quota-policy-get-iter') + query = { + 'query': { + 'quota-policy-info': { + 'vserver': self.parameters['vserver'] + } + } + } + quota_policy_get.translate_struct(query) + try: + result = self.server.invoke_successfully(quota_policy_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching quota policies: %s' % to_native(error), + exception=traceback.format_exc()) + return ([policy['policy-name'] for policy in result['attributes-list'].get_children()] + if result.get_child_by_name('attributes-list') + else []) + + def debug_quota_get_error(self, error): + policies = self.get_quota_policies() + entries = {} + for policy in policies: + entries[policy] = self.get_quotas(policy) + if len(policies) == 1: + self.module.warn('retried with success using policy="%s" on "13001:success" ZAPI error.' % policy) + return entries[policies[0]] + self.module.fail_json(msg='Error fetching quotas info: %s - current vserver policies: %s, details: %s' + % (to_native(error), policies, entries)) + + def quota_entry_set(self): + """ + Adds a quota entry + """ + options = {'volume': self.parameters['volume'], + 'quota-target': self.parameters['quota_target'], + 'quota-type': self.parameters['type'], + 'qtree': self.parameters['qtree']} + + self.set_zapi_options(options) + if self.parameters.get('policy'): + options['policy'] = self.parameters['policy'] + set_entry = netapp_utils.zapi.NaElement.create_node_with_children( + 'quota-set-entry', **options) + try: + self.server.invoke_successfully(set_entry, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error adding/modifying quota entry %s: %s' + % (self.parameters['volume'], to_native(error)), + exception=traceback.format_exc()) + + def quota_entry_delete(self): + """ + Deletes a quota entry + """ + options = {'volume': self.parameters['volume'], + 'quota-target': self.parameters['quota_target'], + 'quota-type': self.parameters['type'], + 'qtree': self.parameters['qtree']} + set_entry = netapp_utils.zapi.NaElement.create_node_with_children( + 'quota-delete-entry', **options) + if self.parameters.get('policy'): + set_entry.add_new_child('policy', self.parameters['policy']) + try: + self.server.invoke_successfully(set_entry, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting quota entry %s: %s' + % (self.parameters['volume'], to_native(error)), + exception=traceback.format_exc()) + + def quota_entry_modify(self, modify_attrs): + """ + Modifies a quota entry + """ + for key in list(modify_attrs): + modify_attrs[key.replace("_", "-")] = modify_attrs.pop(key) + options = {'volume': self.parameters['volume'], + 'quota-target': self.parameters['quota_target'], + 'quota-type': self.parameters['type'], + 'qtree': self.parameters['qtree']} + options.update(modify_attrs) + self.set_zapi_options(options) + if self.parameters.get('policy'): + options['policy'] = str(self.parameters['policy']) + modify_entry = netapp_utils.zapi.NaElement.create_node_with_children( + 'quota-modify-entry', **options) + try: + self.server.invoke_successfully(modify_entry, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying quota entry %s: %s' + % (self.parameters['volume'], to_native(error)), + exception=traceback.format_exc()) + + def set_zapi_options(self, options): + if self.parameters.get('file_limit'): + options['file-limit'] = self.parameters['file_limit'] + if self.parameters.get('disk_limit'): + options['disk-limit'] = self.parameters['disk_limit'] + if self.parameters.get('perform_user_mapping') is not None: + options['perform-user-mapping'] = str(self.parameters['perform_user_mapping']) + if self.parameters.get('soft_file_limit'): + options['soft-file-limit'] = self.parameters['soft_file_limit'] + if self.parameters.get('soft_disk_limit'): + options['soft-disk-limit'] = self.parameters['soft_disk_limit'] + if self.parameters.get('threshold'): + options['threshold'] = self.parameters['threshold'] + + def on_or_off_quota(self, status, cd_action=None): + """ + on or off quota + """ + quota = netapp_utils.zapi.NaElement.create_node_with_children( + status, **{'volume': self.parameters['volume']}) + try: + self.server.invoke_successfully(quota, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if cd_action == 'delete' and status == 'quota-on' and '14958:No valid quota rules found' in to_native(error): + # ignore error on quota-on, as all rules have been deleted + self.module.warn('Last rule deleted, quota is off.') + return + self.module.fail_json(msg='Error setting %s for %s: %s' + % (status, self.parameters['volume'], to_native(error)), + exception=traceback.format_exc()) + + def resize_quota(self, cd_action=None): + """ + resize quota + """ + quota = netapp_utils.zapi.NaElement.create_node_with_children( + 'quota-resize', **{'volume': self.parameters['volume']}) + try: + self.server.invoke_successfully(quota, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if cd_action == 'delete' and '14958:No valid quota rules found' in to_native(error): + # ignore error on quota-on, as all rules have been deleted + self.module.warn('Last rule deleted, but quota is on as resize is not allowed.') + return + self.module.fail_json(msg='Error setting %s for %s: %s' + % ('quota-resize', self.parameters['volume'], to_native(error)), + exception=traceback.format_exc()) + + def get_quotas_rest(self): + """ + Retrieves quotas with rest API. + If type is user then it returns all possible combinations of user name records. + Report api is used to fetch file and disk limit info + """ + if not self.use_rest: + return self.get_quotas() + query = {'svm.name': self.parameters.get('vserver'), + 'volume.name': self.parameters.get('volume'), + 'type': self.parameters.get('type'), + 'fields': 'svm.uuid,' + 'svm.name,' + 'space.hard_limit,' + 'files.hard_limit,' + 'user_mapping,' + 'qtree.name,' + 'type,' + 'space.soft_limit,' + 'files.soft_limit,' + 'volume.uuid,' + 'users.name,' + 'group.name,'} + + # set qtree name in query for type user and group if not ''. + if self.parameters['qtree']: + query['qtree.name'] = self.parameters['qtree'] + if self.parameters.get('quota_target'): + type = self.parameters['type'] + field_name = 'users.name' if type == 'user' else 'group.name' if type == 'group' else 'qtree.name' + query[field_name] = self.parameters['quota_target'] + api = 'storage/quota/rules' + # If type: user, get quota rules api returns users which has name starts with input target user names. + # Example of users list in a record: + # users: [{'name': 'quota_user'}], users: [{'name': 'quota_user'}, {'name': 'quota'}] + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on getting quota rule info: %s" % error) + if records: + record = None + for item in records: + # along with user/group, qtree should also match to get current quota. + # for type user/group if qtree is not set in create, its not returned in GET, make desired qtree None if ''. + desired_qtree = self.parameters['qtree'] if self.parameters.get('qtree') else None + current_qtree = self.na_helper.safe_get(item, ['qtree', 'name']) + type = self.parameters.get('type') + if type in ['user', 'group']: + if desired_qtree != current_qtree: + continue + if type == 'user': + desired_users = self.parameters['quota_target'].split(',') + current_users = [user['name'] for user in item['users']] + if set(current_users) == set(desired_users): + record = item + break + elif item['group']['name'] == self.parameters['quota_target']: + record = item + break + # for type tree, desired quota_target should match current tree. + elif type == 'tree' and current_qtree == self.parameters['quota_target']: + record = item + break + if record: + self.volume_uuid = record['volume']['uuid'] + self.quota_uuid = record['uuid'] + current = { + 'soft_file_limit': self.na_helper.safe_get(record, ['files', 'soft_limit']), + 'disk_limit': self.na_helper.safe_get(record, ['space', 'hard_limit']), + 'soft_disk_limit': self.na_helper.safe_get(record, ['space', 'soft_limit']), + 'file_limit': self.na_helper.safe_get(record, ['files', 'hard_limit']), + 'perform_user_mapping': self.na_helper.safe_get(record, ['user_mapping']), + } + # Rest allows reset quota limits using '-', convert None to '-' to avoid idempotent issue. + current['soft_file_limit'] = '-' if current['soft_file_limit'] is None else str(current['soft_file_limit']) + current['disk_limit'] = '-' if current['disk_limit'] is None else str(current['disk_limit']) + current['soft_disk_limit'] = '-' if current['soft_disk_limit'] is None else str(current['soft_disk_limit']) + current['file_limit'] = '-' if current['file_limit'] is None else str(current['file_limit']) + return current + return None + + def quota_entry_set_rest(self): + """ + quota_entry_set with rest API. + for type: 'user' and 'group', quota_target is used. + value for user, group and qtree should be passed as '' + """ + if not self.use_rest: + return self.quota_entry_set() + body = {'svm.name': self.parameters.get('vserver'), + 'volume.name': self.parameters.get('volume'), + 'type': self.parameters.get('type'), + 'qtree.name': self.parameters.get('qtree')} + quota_target = self.parameters.get('quota_target') + if self.parameters.get('type') == 'user': + body['users.name'] = quota_target.split(',') + elif self.parameters.get('type') == 'group': + body['group.name'] = quota_target + if self.parameters.get('type') == 'tree': + body['qtree.name'] = quota_target + if 'file_limit' in self.parameters: + body['files.hard_limit'] = self.parameters.get('file_limit') + if 'soft_file_limit' in self.parameters: + body['files.soft_limit'] = self.parameters.get('soft_file_limit') + if 'disk_limit' in self.parameters: + body['space.hard_limit'] = self.parameters.get('disk_limit') + if 'soft_disk_limit' in self.parameters: + body['space.soft_limit'] = self.parameters.get('soft_disk_limit') + if 'perform_user_mapping' in self.parameters: + body['user_mapping'] = self.parameters.get('perform_user_mapping') + query = {'return_records': 'true'} # in order to capture UUID + api = 'storage/quota/rules' + response, error = rest_generic.post_async(self.rest_api, api, body, query) + if error: + if "job reported error:" in error and "entry doesn't exist" in error: + # ignore RBAC issue with FSx - BURT1525998 + self.module.warn('Ignoring job status, assuming success.') + elif '5308568' in error: + # code: 5308568 requires quota to be disabled/enabled to take effect. + # code: 5308571 - rule created, but to make it active reinitialize quota. + # reinitialize will disable/enable quota. + self.form_warn_msg_rest('create', '5308568') + elif '5308571' in error: + self.form_warn_msg_rest('create', '5308571') + else: + self.module.fail_json(msg="Error on creating quotas rule: %s" % error) + # fetch volume uuid as response will be None if above code error occurs. + self.volume_uuid = self.get_quota_status_or_volume_id_rest(get_volume=True) + # skip fetching volume uuid from response if volume_uuid already populated. + if not self.volume_uuid and response: + record, error = rrh.check_for_0_or_1_records(api, response, error, query) + if not error and record and not record['volume']['uuid']: + error = 'volume uuid key not present in %s:' % record + if error: + self.module.fail_json(msg='Error on getting volume uuid: %s' % error) + if record: + self.volume_uuid = record['volume']['uuid'] + + def quota_entry_delete_rest(self): + """ + quota_entry_delete with rest API. + """ + if not self.use_rest: + return self.quota_entry_delete() + api = 'storage/quota/rules' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.quota_uuid) + if error is not None: + # delete operation succeeded, but reinitialize is required. + # code: 5308569 requires quota to be disabled/enabled to take effect. + # code: 5308572 error occurs when trying to delete last rule. + if '5308569' in error: + self.form_warn_msg_rest('delete', '5308569') + elif '5308572' in error: + self.form_warn_msg_rest('delete', '5308572') + else: + self.module.fail_json(msg="Error on deleting quotas rule: %s" % error) + + def quota_entry_modify_rest(self, modify_quota): + """ + quota_entry_modify with rest API. + User mapping cannot be turned on for multiuser quota rules. + """ + if not self.use_rest: + return self.quota_entry_modify(modify_quota) + body = {} + if 'disk_limit' in modify_quota: + body['space.hard_limit'] = modify_quota['disk_limit'] + if 'file_limit' in modify_quota: + body['files.hard_limit'] = modify_quota['file_limit'] + if 'soft_disk_limit' in modify_quota: + body['space.soft_limit'] = modify_quota['soft_disk_limit'] + if 'soft_file_limit' in modify_quota: + body['files.soft_limit'] = modify_quota['soft_file_limit'] + if 'perform_user_mapping' in modify_quota: + body['user_mapping'] = modify_quota['perform_user_mapping'] + api = 'storage/quota/rules' + dummy, error = rest_generic.patch_async(self.rest_api, api, self.quota_uuid, body) + if error is not None: + # limits are modified but internal error, require reinitialize quota. + if '5308567' in error: + self.form_warn_msg_rest('modify', '5308567') + else: + self.module.fail_json(msg="Error on modifying quotas rule: %s" % error) + + def get_quota_status_or_volume_id_rest(self, get_volume=None): + """ + Get the status info on or off + """ + if not self.use_rest: + return self.get_quota_status() + api = 'storage/volumes' + params = {'name': self.parameters['volume'], + 'svm.name': self.parameters['vserver'], + 'fields': 'quota.state,uuid'} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + msg = "volume uuid" if get_volume else "quota status info" + self.module.fail_json(msg="Error on getting %s: %s" % (msg, error)) + if record: + return record['uuid'] if get_volume else record['quota']['state'] + self.module.fail_json(msg="Error: Volume %s in SVM %s does not exist" % (self.parameters['volume'], self.parameters['vserver'])) + + def on_or_off_quota_rest(self, status, cd_action=None): + """ + quota_entry_modify quota status with rest API. + """ + if not self.use_rest: + return self.on_or_off_quota(status, cd_action) + body = {} + body['quota.enabled'] = status == 'quota-on' + api = 'storage/volumes' + if not self.volume_uuid: + self.volume_uuid = self.get_quota_status_or_volume_id_rest(get_volume=True) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.volume_uuid, body) + if error is not None: + self.module.fail_json(msg='Error setting %s for %s: %s' + % (status, self.parameters['volume'], to_native(error))) + + def form_warn_msg_rest(self, action, code): + start_msg = "Quota policy rule %s opertation succeeded. " % action + end_msg = "reinitialize(disable and enable again) the quota for volume %s " \ + "in SVM %s." % (self.parameters['volume'], self.parameters['vserver']) + msg = 'unexpected code: %s' % code + if code == '5308572': + msg = "However the rule is still being enforced. To stop enforcing, " + if code in ['5308568', '5308569', '5308567']: + msg = "However quota resize failed due to an internal error. To make quotas active, " + if code == '5308571': + msg = "but quota resize is skipped. To make quotas active, " + self.warn_msg = start_msg + msg + end_msg + + def apply(self): + """ + Apply action to quotas + """ + cd_action = None + modify_quota_status = None + modify_quota = None + current = self.get_quotas_rest() + if self.parameters.get('type') is not None: + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify_quota = self.na_helper.get_modified_attributes(current, self.parameters) + quota_status = self.get_quota_status_or_volume_id_rest() + if 'set_quota_status' in self.parameters and quota_status is not None: + # if 'set_quota_status' == True in create, sometimes there is delay in status update from 'initializing' -> 'on'. + # if quota_status == 'on' and options(set_quota_status == True and activate_quota_on_change == 'resize'), + # sometimes there is delay in status update from 'resizing' -> 'on' + set_quota_status = True if quota_status in ('on', 'resizing', 'initializing') else False + quota_status_action = self.na_helper.get_modified_attributes({'set_quota_status': set_quota_status}, self.parameters) + if quota_status_action: + modify_quota_status = 'quota-on' if quota_status_action['set_quota_status'] else 'quota-off' + if (self.parameters.get('activate_quota_on_change') in ['resize', 'reinitialize'] + and (cd_action is not None or modify_quota is not None) + and modify_quota_status is None + and quota_status in ('on', None)): + modify_quota_status = self.parameters['activate_quota_on_change'] + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.quota_entry_set_rest() + elif cd_action == 'delete': + self.quota_entry_delete_rest() + elif modify_quota: + self.quota_entry_modify_rest(modify_quota) + if modify_quota_status in ['quota-off', 'quota-on']: + self.on_or_off_quota_rest(modify_quota_status) + elif modify_quota_status == 'resize': + if not self.use_rest: + self.resize_quota(cd_action) + elif modify_quota_status == 'reinitialize': + self.on_or_off_quota_rest('quota-off') + time.sleep(10) # status switch interval + self.on_or_off_quota_rest('quota-on', cd_action) + # if warn message and quota not reinitialize, throw warnings to reinitialize in REST. + if self.warn_msg and modify_quota_status != 'reinitialize': + self.module.warn(self.warn_msg) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify_quota, extra_responses={'modify_quota_status': modify_quota_status}) + self.module.exit_json(**result) + + def convert_to_kb_or_bytes(self, option): + """ + convert input to kb, and set to self.parameters. + :param option: disk_limit or soft_disk_limit. + :return: boolean if it can be converted. + """ + self.parameters[option].replace(' ', '') + slices = re.findall(r"\d+|\D+", self.parameters[option]) + if len(slices) < 1 or len(slices) > 2: + return False + if not slices[0].isdigit(): + return False + if len(slices) > 1 and slices[1].lower() not in ['b', 'kb', 'mb', 'gb', 'tb']: + return False + # force kb as the default unit for REST + if len(slices) == 1 and self.use_rest: + slices = (slices[0], 'kb') + if len(slices) > 1: + if not self.use_rest: + # conversion to KB + self.parameters[option] = str(int(slices[0]) * netapp_utils.POW2_BYTE_MAP[slices[1].lower()] // 1024) + else: + # conversion to Bytes + self.parameters[option] = str(int(slices[0]) * netapp_utils.POW2_BYTE_MAP[slices[1].lower()]) + if self.use_rest: + # Rounding off the converted bytes + self.parameters[option] = str(((int(self.parameters[option]) + 1023) // 1024) * 1024) + return True + + +def main(): + '''Execute action''' + quota_obj = NetAppONTAPQuotas() + quota_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py new file mode 100644 index 000000000..544057b2c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_cli.py @@ -0,0 +1,156 @@ +#!/usr/bin/python + +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_rest_cli +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Run CLI commands on ONTAP through REST api/private/cli/. + - This module can run as admin or vsdamin and requires HTTP application to be enabled. + - Access permissions can be customized using ONTAP rest-role. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_rest_cli +short_description: NetApp ONTAP run any CLI command using REST api/private/cli/ +version_added: 2.9.0 +options: + command: + description: + - a string command. + required: true + type: str + verb: + description: + - a string indicating which api call to run + - OPTIONS is useful to know which verbs are supported by the REST API + choices: ['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS'] + required: true + type: str + params: + description: + - a dictionary of parameters to pass into the api call + type: dict + body: + description: + - a dictionary for info specification + type: dict +''' + +EXAMPLES = """ + - name: run ontap rest cli command + netapp.ontap.na_ontap_rest_cli: + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" + command: 'version' + verb: 'GET' + + - name: run ontap rest cli command + netapp.ontap.na_ontap_rest_cli: + hostname: "{{ hostname }}" + username: "{{ admin username }}" + password: "{{ admin password }}" + command: 'security/login/motd' + verb: 'PATCH' + params: {'vserver': 'ansibleSVM'} + body: {'message': 'test'} + + - name: set option + netapp.ontap.na_ontap_rest_cli: + command: options + verb: PATCH + params: + option_name: lldp.enable + body: + option_value: "on" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI + + +class NetAppONTAPCommandREST(): + ''' calls a CLI command ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + command=dict(required=True, type='str'), + verb=dict(required=True, type='str', choices=['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']), + params=dict(required=False, type='dict'), + body=dict(required=False, type='dict') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.rest_api = OntapRestAPI(self.module) + parameters = self.module.params + # set up state variables + self.command = parameters['command'] + self.verb = parameters['verb'] + self.params = parameters['params'] + self.body = parameters['body'] + + if self.rest_api.is_rest(): + self.use_rest = True + else: + msg = 'failed to connect to REST over %s: %s' % (parameters['hostname'], self.rest_api.errors) + msg += '. Use na_ontap_command for non-rest CLI.' + self.module.fail_json(msg=msg) + + def run_command(self): + api = "private/cli/" + self.command + + if self.verb == 'POST': + message, error = self.rest_api.post(api, self.body, self.params) + elif self.verb == 'GET': + message, error = self.rest_api.get(api, self.params) + elif self.verb == 'PATCH': + message, error = self.rest_api.patch(api, self.body, self.params) + elif self.verb == 'DELETE': + message, error = self.rest_api.delete(api, self.body, self.params) + elif self.verb == 'OPTIONS': + message, error = self.rest_api.options(api, self.params) + else: + self.module.fail_json(msg='Error: unexpected verb %s' % self.verb, + exception=traceback.format_exc()) + + if error: + self.module.fail_json(msg='Error: %s' % error) + return message + + def apply(self): + ''' calls the command and returns raw output ''' + changed = False if self.verb in ['GET', 'OPTIONS'] else True + if self.module.check_mode: + output = "Would run command: '%s'" % str(self.command) + else: + output = self.run_command() + self.module.exit_json(changed=changed, msg=output) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppONTAPCommandREST() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py new file mode 100644 index 000000000..b1b5b6dae --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_rest_info.py @@ -0,0 +1,1138 @@ +#!/usr/bin/python + +# (c) 2020-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" NetApp ONTAP Info using REST APIs """ + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_rest_info +author: NetApp Ansible Team (@carchi8py) +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +short_description: NetApp ONTAP information gatherer using REST APIs +description: + - This module allows you to gather various information about ONTAP configuration using REST APIs +version_added: 20.5.0 +notes: + - I(security_login_role_config_info) there is no REST equivalent. + - I(security_login_role_info) there is no REST equivalent. + - I(security_key_manager_key_info) there is no REST equivalent. + - I(vserver_motd_info) there is no REST equivalent. + - I(vserver_login_banner_info) there is no REST equivalent. + - I(vscan_connection_extended_stats_info) there is no REST equivalent. + - I(env_sensors_info) there is no REST equivalent. + - I(fcp_adapter_info) there is no REST equivalent. + - I(net_dev_discovery_info) there is no REST equivalent. + - I(net_failover_group_info) there is no REST equivalent. + - I(net_firewall_info) there is no REST equivalent. + - I(ntfs_dacl_info) there is no REST equivalent. + - I(ntfs_sd_info) there is no REST equivalent. + - I(role_info) there is not REST equivalent. + - I(subsys_health_info) there is not REST equivalent. + - I(volume_move_target_aggr_info) there is not REST equivalent. + +options: + state: + type: str + description: + - deprecated as of 21.1.0. + - this option was ignored and continues to be ignored. + gather_subset: + type: list + elements: str + description: + - When supplied, this argument will restrict the information collected to a given subset. + - Either the REST API or the ZAPI info name can be given. Possible values for this argument include + - application/applications or application_info + - application/consistency-groups + - application/templates or application_template_info + - cloud/targets or cloud_targets_info + - cluster + - cluster/chassis or cluster_chassis_info + - cluster/counter/tables + - cluster/fireware/history + - cluster/jobs or cluster_jobs_info + - cluster/licensing/capacity-pools + - cluster/licensing/license-managers + - cluster/licensing/licenses or license_info + - cluster/mediators + - cluster/metrics or cluster_metrics_info + - cluster/metrocluster or metrocluster_info + - cluster/metrocluster/diagnostics or cluster_metrocluster_diagnostics or metrocluster_check_info + - cluster/metrocluster/dr-groups + - cluster/metrocluster/interconnects + - cluster/metrocluster/nodes or metrocluster-node-get-iter + - cluster/metrocluster/operations + - cluster/metrocluster/svms + - cluster/nodes or cluster_node_info or sysconfig_info + - cluster/ntp/keys + - cluster/ntp/servers or ntp_server_info + - cluster/peers or cluster_peer_info + - cluster/schedules or cluster_schedules or job_schedule_cron_info + - cluster/sensors + - cluster/software or ontap_system_version or cluster_image_info + - cluster/software/download or cluster_software_download + - cluster/software/history or cluster_software_history + - cluster/software/packages or cluster_software_packages + - cluster/web + - name-services/cache/group-membership/settings + - name-services/cache/host/settings + - name-services/cache/netgroup/settings + - name-services/cache/setting + - name-services/cache/unix-group/settings + - name-services/dns or svm_dns_config_info or net_dns_info + - name-services/ldap or svm_ldap_config_info or ldap_client or ldap_config + - name-services/ldap-schemas + - name-services/local-hosts + - name-services/name-mappings or svm_name_mapping_config_info + - name-services/nis or svm_nis_config_info + - name-services/unix-groups + - name-services/unix-users + - network/ethernet/broadcast-domains or broadcast_domains_info or net_port_broadcast_domain_info + - network/ethernet/ports or network_ports_info or net_port_info + - network/ethernet/switch/ports + - network/ethernet/switches or cluster_switch_info + - network/fc/fabrics + - network/fc/interfaces + - network/fc/logins or san_fc_logins_info + - network/fc/ports + - network/fc/wwpn-aliases or san_fc_wppn-aliases or fcp_alias_info + - network/http-proxy + - network/ip/bgp/peer-groups + - network/ip/interfaces or ip_interfaces_info or net_interface_info + - network/ip/routes or ip_routes_info or net_routes_info + - network/ip/service-policies or ip_service_policies or net_interface_service_policy_info + - network/ip/subnets + - network/ipspaces or network_ipspaces_info or net_ipspaces_info + - private/support/alerts or sys_cluster_alerts + - private/cli/vserver/security/file-directory or file_directory_security + - protocols/active-directory + - protocols/audit + - protocols/cifs/connections + - protocols/cifs/domains + - protocols/cifs/group-policies + - protocols/cifs/home-directory/search-paths or cifs_home_directory_info + - protocols/cifs/local-groups + - protocols/cifs/local-users + - protocols/cifs/netbios + - protocols/cifs/services or cifs_services_info or cifs_options_info + - protocols/cifs/session/files + - protocols/cifs/sessions + - protocols/cifs/shadow-copies + - protocols/cifs/shadowcopy-sets + - protocols/cifs/shares or cifs_share_info + - protocols/cifs/users-and-groups/privileges + - protocols/cifs/unix-symlink-mapping + - protocols/fpolicy + - protocols/locks + - protocols/ndmp + - protocols/ndmp/nodes + - protocols/ndmp/sessions + - protocols/ndmp/svms + - protocols/nfs/connected-clients + - protocols/nfs/connected-client-maps + - protocols/nfs/connected-client-settings + - protocols/nfs/export-policies or export_policy_info + - protocols/nfs/export-policies/rules B(Requires the owning_resource to be set) + - protocols/nfs/kerberos/interfaces + - protocols/nfs/kerberos/realms or kerberos_realm_info + - protocols/nfs/services or vserver_nfs_info or nfs_info + - protocols/nvme/interfaces or nvme_interface_info + - protocols/nvme/services or nvme_info + - protocols/nvme/subsystems or nvme_subsystem_info + - protocols/nvme/subsystem-controllers + - protocols/nvme/subsystem-maps + - protocols/s3/buckets + - protocols/s3/services + - protocols/san/fcp/services or san_fcp_services or fcp_service_info + - protocols/san/igroups or nitiator_groups_info or igroup_info + - protocols/san/iscsi/credentials or san_iscsi_credentials + - protocols/san/iscsi/services or san_iscsi_services or iscsi_service_info + - protocols/san/iscsi/sessions + - protocols/san/lun-maps or san_lun_maps or lun_map_info + - protocols/san/portsets + - protocols/san/vvol-bindings + - protocols/vscan or vscan_status_info or vscan_info + - protocols/vscan/on-access-policies B(Requires the owning_resource to be set) + - protocols/vscan/on-demand-policies B(Requires the owning_resource to be set) + - protocols/vscan/scanner-pools B(Requires the owning_resource to be set) + - protocols/vscan/server-status or vscan_connection_status_all_info + - security + - security/accounts or security_login_info or security_login_account_info + - security/anti-ransomware/suspects + - security/audit + - security/audit/destinations or cluster_log_forwarding_info + - security/audit/messages + - security/authentication/cluster/ad-proxy + - security/authentication/cluster/ldap + - security/authentication/cluster/nis + - security/authentication/cluster/saml-sp + - security/authentication/publickeys + - security/aws-kms + - security/azure-key-vaults + - security/certificates + - security/gcp-kms + - security/ipsec + - security/ipsec/ca-certificates + - security/ipsec/policies + - security/ipsec/security-associations + - security/key-manager-configs + - security/key-managers + - security/key-stores + - security/login/messages + - security/multi-admin-verify + - security/multi-admin-verify/approval-groups + - security/multi-admin-verify/requests + - security/multi-admin-verify/rules + - security/roles or security_login_rest_role_info + - security/ssh + - security/ssh/svms + - snapmirror/policies or snapmirror_policy_info + - snapmirror/relationships or snapmirror_info + - storage/aggregates or aggregate_info + - storage/bridges or storage_bridge_info + - storage/cluster + - storage/disks or disk_info + - storage/file/clone/split-loads + - storage/file/clone/split-status + - storage/file/clone/tokens + - storage/file/moves + - storage/flexcache/flexcaches or storage_flexcaches_info + - storage/flexcache/origins or storage_flexcaches_origin_info + - storage/luns or storage_luns_info or lun_info (if serial_number is present, serial_hex and naa_id are computed) + - storage/namespaces or storage_NVMe_namespaces or nvme_namespace_info + - storage/pools + - storage/ports or storage_ports_info + - storage/qos/policies or storage_qos_policies or qos_policy_info or qos_adaptive_policy_info + - storage/qos/workloads + - storage/qtrees or storage_qtrees_config or qtree_info + - storage/quota/reports or storage_quota_reports or quota_report_info + - storage/quota/rules or storage_quota_policy_rules + - storage/shelves or storage_shelves_config or shelf_info + - storage/snaplock/audit-logs + - storage/snaplock/compliance-clocks + - storage/snaplock/event-retention/operations + - storage/snaplock/event-retention/policies + - storage/snaplock/file-fingerprints + - storage/snaplock/litigations + - storage/snapshot-policies or storage_snapshot_policies or snapshot_policy_info + - storage/switches + - storage/tape-devices + - storage/volumes or volume_info + - storage/volumes/snapshots B(Requires the owning_resource to be set) + - storage/volume-efficiency-policies or sis_policy_info + - support/autosupport or autosupport_config_info + - support/autosupport/check or autosupport_check_info + - support/autosupport/messages or autosupport_messages_history + - support/auto-update + - support/auto-update/configurations + - support/auto-update/updates + - support/configuration-backup + - support/configuration-backup/backups + - support/coredump/coredumps + - support/ems or support_ems_config + - support/ems/destinations or event_notification_info or event_notification_destination_info + - support/ems/events or support_ems_events + - support/ems/filters or support_ems_filters + - support/ems/messages + - support/snmp + - support/snmp/traphosts + - support/snmp/users + - svm/migrations + - svm/peers or svm_peers_info or vserver_peer_info + - svm/peer-permissions or svm_peer-permissions_info + - svm/svms or vserver_info + - B(The following do not have direct Rest API equivalent) + - aggr_efficiency_info + - cifs_vserver_security_info + - clock_info + - cluster_identity_info + - net_vlan_info + - sis_info + - snapmirror_destination_info + - system_node_info + - volume_space_info + - Can specify a list of values to include a larger subset. + - REST APIs are supported with ONTAP 9.6 onwards. + default: "demo" + max_records: + type: int + description: + - Maximum number of records returned in a single call. + default: 1024 + fields: + type: list + elements: str + description: + - Request specific fields from subset. + - Recommended - '' to return specified fields, only one subset will be allowed. + - Discouraged - '*' to return all the fields, one or more subsets are allowed. This option can be used for discovery, but is discouraged in production. + - Stongly discouraged - '**' to return all the fields, one or more subsets are allowed. + This option can put an extra load on the system and should not be used in production. + - Limited - '' to return default fields, generally the properties that uniquely identify the record (keys). + Other data is not returned by default and need to be explicitly called for using the field name or *. + - If the option is not present, return default fields for that API (see '' above). + version_added: '20.6.0' + parameters: + description: + - Allows for any rest option to be passed in + type: dict + version_added: '20.7.0' + use_python_keys: + description: + - If true, I(/) in the returned dictionary keys are translated to I(_). + - It makes it possible to use a . notation when processing the output. + - For instance I(ontap_info["svm/svms"]) can be accessed as I(ontap_info.svm_svms). + type: bool + default: false + version_added: '21.9.0' + owning_resource: + description: + - Some resources cannot be accessed directly. You need to select them based on the owner or parent. For instance, volume for a snaphot. + - The following subsets require an owning resource, and the following suboptions when uuid is not present. + - B(volume_name) is the volume name, B(svm_name) is the owning vserver name for the volume. + - B(policy_name) is the name of the policy, B(svm_name) is the owning vserver name for the policy, + B(rule_index) is the rule index. + - B(svm_name) is the owning vserver name for the vscan + - B(svm_name) is the owning vserver name for the vscan + - B(svm_name) is the owning vserver name for the vscan + type: dict + version_added: '21.19.0' + ignore_api_errors: + description: + - List of substrings. + - If a substring is contained in an error message when fetching a subset, the module does not fail and the error is reported in the subset. + type: list + elements: str + version_added: '21.23.0' +''' + +EXAMPLES = ''' +- name: run ONTAP gather facts for vserver info + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + use_rest: Always + gather_subset: + - svm/svms + +- name: run ONTAP gather facts for aggregate info and volume info + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + use_rest: Always + gather_subset: + - storage/aggregates + - storage/volumes + +- name: run ONTAP gather facts for all subsets + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + use_rest: Always + gather_subset: + - all + +- name: run ONTAP gather facts for aggregate info and volume info with fields section + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + fields: + - '*' + validate_certs: false + use_rest: Always + gather_subset: + - storage/aggregates + - storage/volumes + +- name: run ONTAP gather facts for aggregate info with specified fields + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + fields: + - 'uuid' + - 'name' + - 'node' + validate_certs: false + use_rest: Always + gather_subset: + - storage/aggregates + parameters: + recommend: + true + +- name: Get Snapshot info (owning_resource example) + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + fields: + - '*' + validate_certs: false + use_rest: Always + gather_subset: + - storage/volumes/snapshots + owning_resource: + volume_name: volume_name + svm_name: svm_name + +- name: run ONTAP gather facts for volume info with query on name and state + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + gather_subset: + - storage/volumes + parameters: + name: ansible* + state: online + +- name: run ONTAP gather fact to get DACLs + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + gather_subset: + - file_directory_security + parameters: + vserver: svm1 + path: /vol1/qtree1 + use_python_keys: true + +- name: get ip network interface info. + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + gather_subset: + - ip_interfaces_info + parameters: + location.failover: home_node_only + location.node.name: ontap_cluster + service_policy.name: default-data-files + +- name: get aggregate info + netapp.ontap.na_ontap_rest_info: + hostname: "1.2.3.4" + username: "testuser" + password: "test-password" + https: true + validate_certs: false + gather_subset: + - aggregate_info + parameters: + node.name: ontap_cluster + block_storage.primary.raid_type: raid_dp + +# assuming module_defaults is used to set hostname, username, ... +- name: run demo subset using custom vsadmin role + netapp.ontap.na_ontap_rest_info: + gather_subset: + - demo + force_ontap_version: 9.8 + ignore_api_errors: + - 'not authorized for that command' + +# reports: {"cluster/nodes": {"error": {"code": "6", "message": "not authorized for that command"}} +''' + +import codecs +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text, to_bytes +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_owning_resource, rest_vserver + + +class NetAppONTAPGatherInfo(object): + '''Class with gather info methods''' + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(type='str', required=False), + gather_subset=dict(default=['demo'], type='list', elements='str', required=False), + max_records=dict(type='int', default=1024, required=False), + fields=dict(type='list', elements='str', required=False), + parameters=dict(type='dict', required=False), + use_python_keys=dict(type='bool', default=False), + owning_resource=dict(type='dict', required=False), + ignore_api_errors=dict(type='list', elements='str', required=False), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.fields = '' + + self.rest_api = OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_rest_info', 9, 6, 0) + + def get_subset_info(self, gather_subset_info, default_fields=None): + """ + Gather ONTAP information for the given subset using REST APIs + Input for REST APIs call : (api, data) + return gathered_ontap_info + """ + + api = gather_subset_info['api_call'] + if gather_subset_info.pop('post', False): + self.run_post(gather_subset_info) + if default_fields: + fields = default_fields + ',' + self.fields + elif 'fields' in gather_subset_info: + fields = gather_subset_info['fields'] + else: + fields = self.fields + + data = {'max_records': self.parameters['max_records'], 'fields': fields} + + # Delete the fields record from data if it is a private/cli API call. + # The private_cli_fields method handles the fields for API calls using the private/cli endpoint. + if '/private/cli' in api: + del data['fields'] + + # allow for passing in any additional rest api fields + if self.parameters.get('parameters'): + for each in self.parameters['parameters']: + data[each] = self.parameters['parameters'][each] + + gathered_ontap_info, error = self.rest_api.get(api, data) + + if not error: + return gathered_ontap_info + + # If the API doesn't exist (using an older system), we don't want to fail the task. + if int(error.get('code', 0)) == 3 or ( + # if Aggr recommender can't make a recommendation, it will fail with the following error code, don't fail the task. + int(error.get('code', 0)) == 19726344 and "No recommendation can be made for this cluster" in error.get('message')): + return error.get('message') + + # Do not fail on error + for error_pattern in self.parameters.get('ignore_api_errors', []): + if error_pattern in error.get('message'): + return {'error': error} + # Fail the module if error occurs from REST APIs call + if int(error.get('code', 0)) == 6: + error = "Error: %s user is not authorized to make %s api call" % (self.parameters.get('username'), api) + self.module.fail_json(msg=error) + + @staticmethod + def strip_dacls(response): + # Use 'DACL - ACE' as a marker for the start of the list of DACLS in the descriptor. + if 'acls' not in response['records'][0]: + return None + if 'DACL - ACEs' not in response['records'][0]['acls']: + return None + index = response['records'][0]['acls'].index('DACL - ACEs') + dacls = response['records'][0]['acls'][(index + 1):] + + dacl_list = [] + if dacls: + for dacl in dacls: + # The '-' marker is the start of the DACL, the '-0x' marker is the end of the DACL. + start_hyphen = dacl.index('-') + 1 + first_hyphen_removed = dacl[start_hyphen:] + end_hyphen = first_hyphen_removed.index('-0x') + dacl_dict = {'access_type': dacl[:start_hyphen - 1].strip()} + dacl_dict['user_or_group'] = first_hyphen_removed[:end_hyphen] + dacl_list.append(dacl_dict) + return dacl_list + + def run_post(self, gather_subset_info): + api = gather_subset_info['api_call'] + post_return, error = self.rest_api.post(api, None) + if error: + return None + dummy, error = self.rest_api.wait_on_job(post_return['job'], increment=5) + if error: + # TODO: Handle errors that are not errors + self.module.fail_json(msg="%s" % error) + + def get_next_records(self, api): + """ + Gather next set of ONTAP information for the specified api + Input for REST APIs call : (api, data) + return gather_subset_info + """ + + data = {} + gather_subset_info, error = self.rest_api.get(api, data) + + if error: + self.module.fail_json(msg=error) + + return gather_subset_info + + def private_cli_fields(self, api): + ''' + The private cli endpoint does not allow '*' to be an entered. + If fields='*' or fields are not included within the playbook, the API call will be populated to return all possible fields. + If fields is entered into the playbook the fields entered will be used when calling the API. + ''' + if 'fields' not in self.parameters or '*' in self.parameters['fields'] or '**' in self.parameters['fields']: + if api == 'support/autosupport/check': + fields = 'node,corrective-action,status,error-detail,check-type,check-category' + elif api == 'private/cli/vserver/security/file-directory': + fields = 'acls' + else: + self.module.fail_json(msg='Internal error, no field for %s' % api) + else: + fields = ','.join(self.parameters['fields']) + return fields + + def convert_subsets(self): + """ + Convert an info to the REST API + """ + info_to_rest_mapping = { + "aggregate_info": "storage/aggregates", + "aggr_efficiency_info": ['storage/aggregates', 'space.efficiency,name,node'], + "application_info": "application/applications", + "application_template_info": "application/templates", + "autosupport_check_info": "support/autosupport/check", + "autosupport_config_info": "support/autosupport", + "autosupport_messages_history": "support/autosupport/messages", + "broadcast_domains_info": "network/ethernet/broadcast-domains", + "cifs_home_directory_info": "protocols/cifs/home-directory/search-paths", + "cifs_options_info": "protocols/cifs/services", + "cifs_services_info": "protocols/cifs/services", + "cifs_share_info": "protocols/cifs/shares", + "cifs_vserver_security_info": ["protocols/cifs/services", "security.encrypt_dc_connection," + "security.kdc_encryption,security.smb_signing," + "security.smb_encryption," + "security.lm_compatibility_level,svm.name"], + "clock_info": ["cluster/nodes", "date"], + "cloud_targets_info": "cloud/targets", + "cluster_chassis_info": "cluster/chassis", + "cluster_identity_info": ["cluster", "contact,location,name,uuid"], + "cluster_image_info": "cluster/software", + "cluster_jobs_info": "cluster/jobs", + "cluster_log_forwarding_info": "security/audit/destinations", + "cluster_metrocluster_diagnostics": "cluster/metrocluster/diagnostics", + "cluster_metrics_info": "cluster/metrics", + "cluster_node_info": "cluster/nodes", + "cluster_peer_info": "cluster/peers", + "cluster_schedules": "cluster/schedules", + "cluster_software_download": "cluster/software/download", + "cluster_software_history": "cluster/software/history", + "cluster_software_packages": "cluster/software/packages", + "cluster_switch_info": "network/ethernet/switches", + "disk_info": "storage/disks", + "event_notification_info": "support/ems/destinations", + "event_notification_destination_info": "support/ems/destinations", + "export_policy_info": "protocols/nfs/export-policies", + "fcp_alias_info": "network/fc/wwpn-aliases", + "fcp_service_info": "protocols/san/fcp/services", + "file_directory_security": "private/cli/vserver/security/file-directory", + "igroup_info": "protocols/san/igroups", + "initiator_groups_info": "protocols/san/igroups", + "ip_interfaces_info": "network/ip/interfaces", + "ip_routes_info": "network/ip/routes", + "ip_service_policies": "network/ip/service-policies", + "iscsi_service_info": "protocols/san/iscsi/services", + "job_schedule_cron_info": "cluster/schedules", + "kerberos_realm_info": "protocols/nfs/kerberos/realms", + "ldap_client": "name-services/ldap", + "ldap_config": "name-services/ldap", + "license_info": "cluster/licensing/licenses", + "lun_info": "storage/luns", + "lun_map_info": "protocols/san/lun-maps", + "net_dns_info": "name-services/dns", + "net_interface_info": "network/ip/interfaces", + "net_interface_service_policy_info": "network/ip/service-policies", + "net_port_broadcast_domain_info": "network/ethernet/broadcast-domains", + "net_port_info": "network/ethernet/ports", + "net_routes_info": "network/ip/routes", + "net_ipspaces_info": "network/ipspaces", + "net_vlan_info": ["network/ethernet/ports", "name,node.name,vlan.base_port,vlan.tag"], + "network_ipspaces_info": "network/ipspaces", + "network_ports_info": "network/ethernet/ports", + "nfs_info": "protocols/nfs/services", + "ntp_server_info": "cluster/ntp/servers", + "nvme_info": "protocols/nvme/services", + "nvme_interface_info": "protocols/nvme/interfaces", + "nvme_namespace_info": "storage/namespaces", + "nvme_subsystem_info": "protocols/nvme/subsystems", + "metrocluster_info": "cluster/metrocluster", + "metrocluster_node_info": "cluster/metrocluster/nodes", + "metrocluster_check_info": "cluster/metrocluster/diagnostics", + "ontap_system_version": "cluster/software", + "quota_report_info": "storage/quota/reports", + "qos_policy_info": "storage/qos/policies", + "qos_adaptive_policy_info": "storage/qos/policies", + "qtree_info": "storage/qtrees", + "san_fc_logins_info": "network/fc/logins", + "san_fc_wppn-aliases": "network/fc/wwpn-aliases", + "san_fcp_services": "protocols/san/fcp/services", + "san_iscsi_credentials": "protocols/san/iscsi/credentials", + "san_iscsi_services": "protocols/san/iscsi/services", + "san_lun_maps": "protocols/san/lun-maps", + "security_login_account_info": "security/accounts", + "security_login_info": "security/accounts", + "security_login_rest_role_info": "security/roles", + "shelf_info": "storage/shelves", + "sis_info": ["storage/volumes", "efficiency.compression,efficiency.cross_volume_dedupe," + "efficiency.cross_volume_dedupe,efficiency.compaction," + "efficiency.compression,efficiency.dedupe,efficiency.policy.name," + "efficiency.schedule,svm.name"], + "sis_policy_info": "storage/volume-efficiency-policies", + "snapmirror_destination_info": ["snapmirror/relationships", "destination.path,destination.svm.name," + "destination.svm.uuid,policy.type,uuid,state," + "source.path,source.svm.name,source.svm.uuid," + "transfer.bytes_transferred"], + "snapmirror_info": "snapmirror/relationships", + "snapmirror_policy_info": "snapmirror/policies", + "snapshot_policy_info": "storage/snapshot-policies", + "storage_bridge_info": "storage/bridges", + "storage_flexcaches_info": "storage/flexcache/flexcaches", + "storage_flexcaches_origin_info": "storage/flexcache/origins", + "storage_luns_info": "storage/luns", + "storage_NVMe_namespaces": "storage/namespaces", + "storage_ports_info": "storage/ports", + "storage_qos_policies": "storage/qos/policies", + "storage_qtrees_config": "storage/qtrees", + "storage_quota_reports": "storage/quota/reports", + "storage_quota_policy_rules": "storage/quota/rules", + "storage_shelves_config": "storage/shelves", + "storage_snapshot_policies": "storage/snapshot-policies", + "support_ems_config": "support/ems", + "support_ems_events": "support/ems/events", + "support_ems_filters": "support/ems/filters", + "svm_dns_config_info": "name-services/dns", + "svm_ldap_config_info": "name-services/ldap", + "svm_name_mapping_config_info": "name-services/name-mappings", + "svm_nis_config_info": "name-services/nis", + "svm_peers_info": "svm/peers", + "svm_peer-permissions_info": "svm/peer-permissions", + "sysconfig_info": "cluster/nodes", + "system_node_info": ["cluster/nodes", "controller.cpu.firmware_release,controller.failed_fan.count," + "controller.failed_fan.message," + "controller.failed_power_supply.count," + "controller.failed_power_supply.message," + "controller.over_temperature,is_all_flash_optimized," + "is_all_flash_select_optimized,is_capacity_optimized,state,name," + "location,model,nvram.id,owner,serial_number,storage_configuration," + "system_id,uptime,uuid,vendor_serial_number,nvram.battery_state," + "version,vm.provider_type"], + "sys_cluster_alerts": "private/support/alerts", + "vserver_info": "svm/svms", + "vserver_peer_info": "svm/peers", + "vserver_nfs_info": "protocols/nfs/services", + "volume_info": "storage/volumes", + "volume_space_info": ["storage/volumes", 'space.logical_space.available,space.logical_space.used,' + 'space.logical_space.used_percent,space.snapshot.reserve_size,' + 'space.snapshot.reserve_percent,space.used,name,svm.name'], + "vscan_connection_status_all_info": "protocols/vscan/server-status", + "vscan_info": "protocols/vscan", + "vscan_status_info": "protocols/vscan" + } + # Add rest API names as there info version, also make sure we don't add a duplicate + subsets = [] + for subset in self.parameters['gather_subset']: + if subset in info_to_rest_mapping: + if info_to_rest_mapping[subset] not in subsets: + subsets.append(info_to_rest_mapping[subset]) + elif subset not in subsets: + subsets.append(subset) + return subsets + + def add_naa_id(self, info): + ''' https://kb.netapp.com/Advice_and_Troubleshooting/Data_Storage_Systems/FlexPod_with_Infrastructure_Automation/ + How_to_match__LUNs_NAA_number_to_its_serial_number + ''' + if info and 'records' in info: + for lun in info['records']: + if 'serial_number' in lun: + hexlify = codecs.getencoder('hex') + lun['serial_hex'] = to_text(hexlify(to_bytes(lun['serial_number']))[0]) + lun['naa_id'] = 'naa.600a0980' + lun['serial_hex'] + + def augment_subset_info(self, subset, subset_info): + if subset == 'private/cli/vserver/security/file-directory': + # creates a new list of dicts + subset_info = self.strip_dacls(subset_info) + if subset == 'storage/luns': + # mutates the existing dicts + self.add_naa_id(subset_info) + return subset_info + + def get_ontap_subset_info_all(self, subset, default_fields, get_ontap_subset_info): + """ Iteratively get all records for a subset """ + try: + # Verify whether the supported subset passed + specified_subset = get_ontap_subset_info[subset] + except KeyError: + self.module.fail_json(msg="Specified subset %s is not found, supported subsets are %s" % + (subset, list(get_ontap_subset_info.keys()))) + if 'api_call' not in specified_subset: + specified_subset['api_call'] = subset + subset_info = self.get_subset_info(specified_subset, default_fields) + + if subset_info is not None and isinstance(subset_info, dict) and '_links' in subset_info: + while subset_info['_links'].get('next'): + # Get all the set of records if next link found in subset_info for the specified subset + next_api = subset_info['_links']['next']['href'] + gathered_subset_info = self.get_next_records(next_api.replace('/api', '')) + + # Update the subset info for the specified subset + subset_info['_links'] = gathered_subset_info['_links'] + subset_info['records'].extend(gathered_subset_info['records']) + + # metrocluster doesn't have a records field, so we need to skip this + if subset_info.get('records') is not None: + # Getting total number of records + subset_info['num_records'] = len(subset_info['records']) + + return self.augment_subset_info(subset, subset_info) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + # Defining gather_subset and appropriate api_call + get_ontap_subset_info = { + 'application/applications': {}, + 'application/consistency-groups': {'version': (9, 10, 1)}, + 'application/templates': {}, + 'cloud/targets': {}, + 'cluster': {}, + 'cluster/chassis': {}, + 'cluster/counter/tables': {'version': (9, 11, 1)}, + 'cluster/fireware/history': {'version': (9, 8)}, + 'cluster/jobs': {}, + 'cluster/licensing/capacity-pools': {'version': (9, 8)}, + 'cluster/licensing/license-managers': {'version': (9, 8)}, + 'cluster/licensing/licenses': {}, + 'cluster/mediators': {'version': (9, 8)}, + 'cluster/metrics': {}, + 'cluster/metrocluster': {'version': (9, 8)}, + 'cluster/metrocluster/diagnostics': { + 'version': (9, 8), + 'post': True + }, + 'cluster/metrocluster/dr-groups': {'version': (9, 8)}, + 'cluster/metrocluster/interconnects': {'version': (9, 8)}, + 'cluster/metrocluster/nodes': {'version': (9, 8)}, + 'cluster/metrocluster/operations': {'version': (9, 8)}, + 'cluster/metrocluster/svms': {'version': (9, 11, 1)}, + 'cluster/nodes': {}, + 'cluster/ntp/keys': {'version': (9, 7)}, + 'cluster/ntp/servers': {'version': (9, 7)}, + 'cluster/peers': {}, + 'cluster/schedules': {}, + 'cluster/sensors': {'version': (9, 11, 1)}, + 'cluster/software': {}, + 'cluster/software/download': {'version': (9, 7)}, + 'cluster/software/history': {}, + 'cluster/software/packages': {}, + 'cluster/web': {'version': (9, 10, 1)}, + 'name-services/cache/group-membership/settings': {'version': (9, 11, 1)}, + 'name-services/cache/host/settings': {'version': (9, 11, 1)}, + 'name-services/cache/netgroup/settings': {'version': (9, 11, 1)}, + 'name-services/cache/setting': {'version': (9, 11, 1)}, + 'name-services/cache/unix-group/settings': {'version': (9, 11, 1)}, + 'name-services/dns': {}, + 'name-services/ldap': {}, + 'name-services/ldap-schemas': {'version': (9, 11, 1)}, + 'name-services/local-hosts': {'version': (9, 10, 1)}, + 'name-services/name-mappings': {}, + 'name-services/nis': {}, + 'name-services/unix-groups': {'version': (9, 9)}, + 'name-services/unix-users': {'version': (9, 9)}, + 'network/ethernet/broadcast-domains': {}, + 'network/ethernet/ports': {}, + 'network/ethernet/switch/ports': {'version': (9, 8)}, + 'network/ethernet/switches': {'version': (9, 8)}, + 'network/fc/fabrics': {'version': (9, 11, 1)}, + 'network/fc/interfaces': {}, + 'network/fc/logins': {}, + 'network/fc/ports': {}, + 'network/fc/wwpn-aliases': {}, + 'network/http-proxy': {'version': (9, 7)}, + 'network/ip/bgp/peer-groups': {'version': (9, 7)}, + 'network/ip/interfaces': {}, + 'network/ip/routes': {}, + 'network/ip/service-policies': {}, + 'network/ip/subnets': {'version': (9, 11, 1)}, + 'network/ipspaces': {}, + 'private/support/alerts': {}, + 'protocols/active-directory': {'version': (9, 12, 1)}, + 'protocols/audit': {}, + 'protocols/cifs/connections': {'version': (9, 11, 1)}, + 'protocols/cifs/domains': {'version': (9, 10, 1)}, + 'protocols/cifs/group-policies': {'version': (9, 12, 1)}, + 'protocols/cifs/home-directory/search-paths': {}, + 'protocols/cifs/local-groups': {'version': (9, 9)}, + 'protocols/cifs/local-users': {'version': (9, 9)}, + 'protocols/cifs/netbios': {'version': (9, 11, 1)}, + 'protocols/cifs/services': {}, + 'protocols/cifs/session/files': {'version': (9, 11, 1)}, + 'protocols/cifs/sessions': {'version': (9, 8)}, + 'protocols/cifs/shadow-copies': {'version': (9, 11, 1)}, + 'protocols/cifs/shadowcopy-sets': {'version': (9, 11, 1)}, + 'protocols/cifs/shares': {}, + 'protocols/cifs/unix-symlink-mapping': {}, + 'protocols/cifs/users-and-groups/privileges': {'version': (9, 9)}, + 'protocols/fpolicy': {}, + 'protocols/locks': {'version': (9, 10, 1)}, + 'protocols/ndmp': {'version': (9, 7)}, + 'protocols/ndmp/nodes': {'version': (9, 7)}, + 'protocols/ndmp/sessions': {'version': (9, 7)}, + 'protocols/ndmp/svms': {'version': (9, 7)}, + 'protocols/nfs/connected-clients': {'version': (9, 7)}, + 'protocols/nfs/connected-client-maps': {'version': (9, 11, 1)}, + 'protocols/nfs/connected-client-settings': {'version': (9, 12, 1)}, + 'protocols/nfs/export-policies': {}, + 'protocols/nfs/kerberos/interfaces': {}, + 'protocols/nfs/kerberos/realms': {}, + 'protocols/nfs/services': {}, + 'protocols/nvme/interfaces': {}, + 'protocols/nvme/services': {}, + 'protocols/nvme/subsystem-controllers': {}, + 'protocols/nvme/subsystem-maps': {}, + 'protocols/nvme/subsystems': {}, + 'protocols/s3/buckets': {'version': (9, 7)}, + 'protocols/s3/services': {'version': (9, 7)}, + 'protocols/san/fcp/services': {}, + 'protocols/san/igroups': {}, + 'protocols/san/iscsi/credentials': {}, + 'protocols/san/iscsi/services': {}, + 'protocols/san/iscsi/sessions': {}, + 'protocols/san/lun-maps': {}, + 'protocols/san/portsets': {'version': (9, 9)}, + 'protocols/san/vvol-bindings': {'version': (9, 10, 1)}, + 'protocols/vscan/server-status': {}, + 'protocols/vscan': {}, + 'security': {'version': (9, 7)}, + 'security/accounts': {}, + 'security/anti-ransomware/suspects': {'version': (9, 10, 1)}, + 'security/audit': {}, + 'security/audit/destinations': {}, + 'security/audit/messages': {}, + 'security/authentication/cluster/ad-proxy': {'version': (9, 7)}, + 'security/authentication/cluster/ldap': {}, + 'security/authentication/cluster/nis': {}, + 'security/authentication/cluster/saml-sp': {}, + 'security/authentication/publickeys': {'version': (9, 7)}, + 'security/aws-kms': {'version': (9, 12, 1)}, + 'security/azure-key-vaults': {'version': (9, 8)}, + 'security/certificates': {}, + 'security/gcp-kms': {'version': (9, 9)}, + 'security/ipsec': {'version': (9, 8)}, + 'security/ipsec/ca-certificates': {'version': (9, 10, 1)}, + 'security/ipsec/policies': {'version': (9, 8)}, + 'security/ipsec/security-associations': {'version': (9, 8)}, + 'security/key-manager-configs': {'version': (9, 10, 1)}, + 'security/key-managers': {}, + 'security/key-stores': {'version': (9, 10, 1)}, + 'security/login/messages': {}, + 'security/multi-admin-verify': {'version': (9, 11, 1)}, + 'security/multi-admin-verify/approval-groups': {'version': (9, 11, 1)}, + 'security/multi-admin-verify/requests': {'version': (9, 11, 1)}, + 'security/multi-admin-verify/rules': {'version': (9, 11, 1)}, + 'security/roles': {}, + 'security/ssh': {'version': (9, 7)}, + 'security/ssh/svms': {'version': (9, 10, 1)}, + 'snapmirror/policies': {}, + 'snapmirror/relationships': {}, + 'storage/aggregates': {}, + 'storage/bridges': {'version': (9, 9)}, + 'storage/cluster': {}, + 'storage/disks': {}, + 'storage/file/clone/split-loads': {'version': (9, 10, 1)}, + 'storage/file/clone/split-status': {'version': (9, 10, 1)}, + 'storage/file/clone/tokens': {'version': (9, 10, 1)}, + 'storage/file/moves': {'version': (9, 11, 1)}, + 'storage/flexcache/flexcaches': {}, + 'storage/flexcache/origins': {}, + 'storage/luns': {}, + 'storage/namespaces': {}, + 'storage/pools': {'version': (9, 11, 1)}, + 'storage/ports': {}, + 'storage/qos/policies': {}, + 'storage/qos/workloads': {'version': (9, 10, 1)}, + 'storage/qtrees': {}, + 'storage/quota/reports': {}, + 'storage/quota/rules': {}, + 'storage/shelves': {}, + 'storage/snaplock/audit-logs': {'version': (9, 7)}, + 'storage/snaplock/compliance-clocks': {'version': (9, 7)}, + 'storage/snaplock/event-retention/operations': {'version': (9, 7)}, + 'storage/snaplock/event-retention/policies': {'version': (9, 7)}, + 'storage/snaplock/file-fingerprints': {'version': (9, 7)}, + 'storage/snaplock/litigations': {'version': (9, 7)}, + 'storage/snapshot-policies': {}, + 'storage/switches': {'version': (9, 9)}, + 'storage/tape-devices': {'version': (9, 9)}, + 'storage/volumes': {}, + 'storage/volume-efficiency-policies': {'version': (9, 8)}, + 'support/autosupport': {}, + 'support/autosupport/check': { + 'api_call': '/private/cli/system/node/autosupport/check/details', + 'fields': self.private_cli_fields('support/autosupport/check'), + }, + 'support/autosupport/messages': {}, + 'support/auto-update': {'version': (9, 10, 1)}, + 'support/auto-update/configurations': {'version': (9, 10, 1)}, + 'support/auto-update/updates': {'version': (9, 10, 1)}, + 'support/configuration-backup': {}, + 'support/configuration-backup/backups': {'version': (9, 7)}, + 'support/coredump/coredumps': {'version': (9, 10, 1)}, + 'support/ems': {}, + 'support/ems/destinations': {}, + 'support/ems/events': {}, + 'support/ems/filters': {}, + 'support/ems/messages': {}, + 'support/snmp': {'version': (9, 7)}, + 'support/snmp/traphosts': {'version': (9, 7)}, + 'support/snmp/users': {'version': (9, 7)}, + 'svm/migrations': {'version': (9, 10, 1)}, + 'svm/peers': {}, + 'svm/peer-permissions': {}, + 'svm/svms': {} + } + if 'gather_subset' in self.parameters and ( + 'private/cli/vserver/security/file-directory' in self.parameters['gather_subset'] + or 'file_directory_security' in self.parameters['gather_subset'] + ): + get_ontap_subset_info['private/cli/vserver/security/file-directory'] = { + 'api_call': 'private/cli/vserver/security/file-directory', + 'fields': self.private_cli_fields('private/cli/vserver/security/file-directory') + } + if 'all' in self.parameters['gather_subset']: + # If all in subset list, get the information of all subsets + self.parameters['gather_subset'] = sorted(get_ontap_subset_info.keys()) + if 'demo' in self.parameters['gather_subset']: + self.parameters['gather_subset'] = ['cluster/software', 'svm/svms', 'cluster/nodes'] + get_ontap_subset_info = self.add_uuid_subsets(get_ontap_subset_info) + + length_of_subsets = len(self.parameters['gather_subset']) + unsupported_subsets = self.subset_version_warning(get_ontap_subset_info) + + if self.parameters.get('fields'): + if '**' in self.parameters.get('fields'): + self.module.warn('Using ** can put an extra load on the system and should not be used in production') + # If multiple fields specified to return, convert list to string + self.fields = ','.join(self.parameters.get('fields')) + + if self.fields not in ('*', '**') and length_of_subsets > 1: + # Restrict gather subsets to one subset if fields section is list_of_fields + self.module.fail_json(msg="Error: fields: %s, only one subset will be allowed." % self.parameters.get('fields')) + converted_subsets = self.convert_subsets() + + result_message = {} + for subset in converted_subsets: + subset, default_fields = subset if isinstance(subset, list) else (subset, None) + result_message[subset] = self.get_ontap_subset_info_all(subset, default_fields, get_ontap_subset_info) + for subset in unsupported_subsets: + result_message[subset] = '%s requires ONTAP %s' % (subset, get_ontap_subset_info[subset]['version']) + + results = {'changed': False} + if self.parameters.get('state') is not None: + results['state'] = self.parameters['state'] + results['warnings'] = "option 'state' is deprecated." + if self.parameters['use_python_keys']: + new_dict = dict((key.replace('/', '_'), value) for (key, value) in result_message.items()) + new_dict = dict((key.replace('-', '_'), value) for (key, value) in new_dict.items()) + result_message = new_dict + self.module.exit_json(ontap_info=result_message, **results) + + def subset_version_warning(self, get_ontap_subset_info): + # If a user requests a subset that their version of ONTAP does not support give them a warning (but don't fail) + unsupported_subset = [] + warn_message = '' + user_version = self.rest_api.get_ontap_version() + for subset in self.parameters['gather_subset']: + if subset in get_ontap_subset_info and 'version' in get_ontap_subset_info[subset] and get_ontap_subset_info[subset]['version'] > user_version: + warn_message += '%s requires %s, ' % (subset, get_ontap_subset_info[subset]['version']) + # remove subset so info dosn't fail for a bad subset + unsupported_subset.append(subset) + self.parameters['gather_subset'].remove(subset) + if warn_message != '': + self.module.warn('The following subset have been removed from your query as they are not supported on your version of ONTAP %s' % warn_message) + return unsupported_subset + + def add_uuid_subsets(self, get_ontap_subset_info): + params = self.parameters.get('owning_resource') + if 'gather_subset' in self.parameters: + if 'storage/volumes/snapshots' in self.parameters['gather_subset']: + self.check_error_values('storage/volumes/snapshots', params, ['volume_name', 'svm_name']) + volume_uuid = rest_owning_resource.get_volume_uuid(self.rest_api, self.parameters['owning_resource']['volume_name'], + self.parameters['owning_resource']['svm_name'], self.module) + if volume_uuid: + get_ontap_subset_info['storage/volumes/snapshots'] = {'api_call': 'storage/volumes/%s/snapshots' % volume_uuid} + if 'protocols/nfs/export-policies/rules' in self.parameters['gather_subset']: + self.check_error_values('protocols/nfs/export-policies/rules', params, ['policy_name', 'svm_name', 'rule_index']) + policy_id = rest_owning_resource.get_export_policy_id(self.rest_api, self.parameters['owning_resource']['policy_name'], + self.parameters['owning_resource']['svm_name'], self.module) + if policy_id: + get_ontap_subset_info['protocols/nfs/export-policies/rules'] = { + 'api_call': 'protocols/nfs/export-policies/%s/rules/%s' % (policy_id, self.parameters['owning_resource']['rule_index']), + } + if 'protocols/vscan/on-access-policies' in self.parameters['gather_subset']: + self.add_vserver_owning_resource('protocols/vscan/on-access-policies', params, 'protocols/vscan/%s/on-access-policies', get_ontap_subset_info) + if 'protocols/vscan/on-demand-policies' in self.parameters['gather_subset']: + self.add_vserver_owning_resource('protocols/vscan/on-demand-policies', params, 'protocols/vscan/%s/on-demand-policies', get_ontap_subset_info) + if 'protocols/vscan/scanner-pools' in self.parameters['gather_subset']: + self.add_vserver_owning_resource('protocols/vscan/scanner-pools', params, 'protocols/vscan/%s/scanner-pools', get_ontap_subset_info) + return get_ontap_subset_info + + def add_vserver_owning_resource(self, subset, params, api, get_ontap_subset_info): + self.check_error_values(subset, params, ['svm_name']) + svm_uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['owning_resource']['svm_name'], self.module, True) + if svm_uuid: + get_ontap_subset_info[subset] = {'api_call': api % svm_uuid} + + def check_error_values(self, api, params, items): + error = not params or sorted(list(params.keys())) != sorted(items) + if error: + self.module.fail_json(msg="Error: %s are required for %s" % (', '.join(items), api)) + + +def main(): + """ + Main function + """ + obj = NetAppONTAPGatherInfo() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py new file mode 100644 index 000000000..7bfd63b71 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_restit.py @@ -0,0 +1,393 @@ +#!/usr/bin/python +''' +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Call a REST API on ONTAP. + - Cluster REST API are run using a cluster admin account. + - Vserver REST API can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver_) options). + - In case of success, a json dictionary is returned as C(response). + - In case of a REST API error, C(status_code), C(error_code), C(error_message) are set to help with diagnosing the issue, + - and the call is reported as an error ('failed'). + - Other errors (eg connection issues) are reported as Ansible error. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_restit +short_description: NetApp ONTAP Run any REST API on ONTAP +version_added: "20.4.0" +options: + api: + description: + - The REST API to call (eg I(cluster/software), I(svms/svm)). + required: true + type: str + method: + description: + - The REST method to use. + default: GET + type: str + query: + description: + - A list of dictionaries for the query parameters + type: dict + body: + description: + - A dictionary for the info parameter + type: dict + aliases: ['info'] + vserver_name: + description: + - if provided, forces vserver tunneling. username identifies a cluster admin account. + type: str + vserver_uuid: + description: + - if provided, forces vserver tunneling. username identifies a cluster admin account. + type: str + hal_linking: + description: + - if true, HAL-encoded links are returned in the response. + default: false + type: bool + wait_for_completion: + description: + - when true, POST/PATCH/DELETE can be handled synchronously and asynchronously. + - if the response indicates that a job is in progress, the job status is checked periodically until is completes. + - when false, the call returns immediately. + type: bool + default: false + version_added: 21.14.0 + files: + description: + - A dictionary for the parameters when using multipart/form-data. + - This is very infrequently needed, but required to write a file (see examples) + - When present, requests will automatically set the Content-Type header to multipart/form-data. + type: dict + version_added: 21.24.0 + accept_header: + description: + - Value for the Accept request HTTP header. + - This is very infrequently needed, but required to read a file (see examples). + - For most cases, omit this field. Set it to "multipart/form-data" when expecting such a format. + - By default the module is using "application/json" or "application/hal+json" when hal_linking is true. + type: str + version_added: 21.24.0 +''' + +EXAMPLES = """ +- + name: Ontap REST API + hosts: localhost + gather_facts: False + collections: + - netapp.ontap + vars: + login: &login + hostname: "{{ admin_ip }}" + username: "{{ admin_username }}" + password: "{{ admin_password }}" + https: true + validate_certs: false + svm_login: &svm_login + hostname: "{{ svm_admin_ip }}" + username: "{{ svm_admin_username }}" + password: "{{ svm_admin_password }}" + https: true + validate_certs: false + + tasks: + - name: run ontap REST API command as cluster admin + na_ontap_restit: + <<: *login + api: cluster/software + register: result + - debug: var=result + - assert: { that: result.status_code==200, quiet: True } + + - name: run ontap REST API command as cluster admin + na_ontap_restit: + <<: *login + api: cluster/software + query: + fields: version + register: result + - debug: var=result + - assert: { that: result.status_code==200, quiet: True } + + - name: run ontap REST API command as cluster admin + na_ontap_restit: + <<: *login + api: svm/svms + register: result + - debug: var=result + - assert: { that: result.status_code==200, quiet: True } + + - name: run ontap REST API command as cluster admin + na_ontap_restit: + <<: *login + api: svm/svms + query: + fields: aggregates,cifs,nfs,uuid + query_fields: name + query: trident_svm + hal_linking: true + register: result + - debug: var=result + + - name: run ontap REST API command as vsadmin + na_ontap_restit: + <<: *svm_login + api: svm/svms + register: result + - debug: var=result + - assert: { that: result.status_code==200, quiet: True } + + - name: run ontap REST API command as vserver tunneling + na_ontap_restit: + <<: *login + api: storage/volumes + vserver_name: ansibleSVM + register: result + - debug: var=result + - assert: { that: result.status_code==200, quiet: True } + - set_fact: + uuid: "{{ result.response.records | json_query(get_uuid) }}" + vars: + get_uuid: "[? name=='deleteme_ln1'].uuid" + - debug: var=uuid + + - name: run ontap REST API command as DELETE method with vserver tunneling + na_ontap_restit: + <<: *login + api: "storage/volumes/{{ uuid[0] }}" + method: DELETE + vserver_name: ansibleSVM + query: + return_timeout: 60 + register: result + when: uuid|length == 1 + - debug: var=result + - assert: { that: result.skipped|default(false) or result.status_code|default(404) == 200, quiet: True } + + - name: run ontap REST API command as POST method with vserver tunneling + na_ontap_restit: + <<: *login + api: storage/volumes + method: POST + vserver_name: ansibleSVM + query: + return_records: "true" + return_timeout: 60 + body: + name: deleteme_ln1 + aggregates: + - name: aggr1 + register: result + - debug: var=result + - assert: { that: result.status_code==201, quiet: True } + + - name: run ontap REST API command as DELETE method with vserver tunneling + # delete test volume if present + na_ontap_restit: + <<: *login + api: "storage/volumes/{{ result.response.records[0].uuid }}" + method: DELETE + vserver_name: ansibleSVM + query: + return_timeout: 60 + register: result + - debug: var=result + - assert: { that: result.status_code==200, quiet: True } + + - name: create a file + # assuming credentials are set using module_defaults + na_ontap_restit: + api: storage/volumes/f3c003cb-2974-11ed-b2f8-005056b38dae/files/laurent123.txt + method: post + files: {'data': 'some data'} + + - name: read a file + # assuming credentials are set using module_defaults + na_ontap_restit: + api: storage/volumes/f3c003cb-2974-11ed-b2f8-005056b38dae/files/laurent123.txt + method: get + accept_header: "multipart/form-data" + query: + length: 100 + +# error cases + - name: run ontap REST API command + na_ontap_restit: + <<: *login + api: unknown/endpoint + register: result + ignore_errors: True + - debug: var=result + - assert: { that: result.status_code==404, quiet: True } + +""" + +RETURN = """ +response: + description: + - If successful, a json dictionary returned by the REST API. + - If the REST API was executed but failed, an empty dictionary. + - Not present if the REST API call cannot be performed. + returned: On success + type: dict +status_code: + description: + - The http status code. + - When wait_for_completion is True, this is forced to 0. + returned: Always + type: str +error_code: + description: + - If the REST API was executed but failed, the error code set by the REST API. + - Not present if successful, or if the REST API call cannot be performed. + returned: On error + type: str +error_message: + description: + - If the REST API was executed but failed, the error message set by the REST API. + - Not present if successful, or if the REST API call cannot be performed. + returned: On error + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPRestAPI(object): + ''' calls a REST API command ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + api=dict(required=True, type='str'), + method=dict(required=False, type='str', default='GET'), + query=dict(required=False, type='dict'), + body=dict(required=False, type='dict', aliases=['info']), + vserver_name=dict(required=False, type='str'), + vserver_uuid=dict(required=False, type='str'), + hal_linking=dict(required=False, type='bool', default=False), + wait_for_completion=dict(required=False, type='bool', default=False), + # to support very infrequent form-data format + files=dict(required=False, type='dict'), + accept_header=dict(required=False, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + ) + parameters = self.module.params + # set up state variables + self.api = parameters['api'] + self.method = parameters['method'] + self.query = parameters['query'] + self.body = parameters['body'] + self.vserver_name = parameters['vserver_name'] + self.vserver_uuid = parameters['vserver_uuid'] + self.hal_linking = parameters['hal_linking'] + self.wait_for_completion = parameters['wait_for_completion'] + self.files = parameters['files'] + self.accept_header = parameters['accept_header'] + + self.rest_api = OntapRestAPI(self.module) + + if self.accept_header is None: + self.accept_header = 'application/hal+json' if self.hal_linking else 'application/json' + + def build_headers(self): + return self.rest_api.build_headers(accept=self.accept_header, vserver_name=self.vserver_name, vserver_uuid=self.vserver_uuid) + + def fail_on_error(self, status, response, error): + if error: + if isinstance(error, dict): + error_message = error.pop('message', None) + error_code = error.pop('code', None) + if not error: + # we exhausted the dictionary + error = 'check error_message and error_code for details.' + else: + error_message = error + error_code = None + + msg = "Error when calling '%s': %s" % (self.api, str(error)) + self.module.fail_json(msg=msg, status_code=status, response=response, error_message=error_message, error_code=error_code) + + def run_api(self): + ''' calls the REST API ''' + # TODO, log usage + status, response, error = self.rest_api.send_request(self.method, self.api, self.query, self.body, self.build_headers(), self.files) + self.fail_on_error(status, response, error) + + return status, response + + def run_api_async(self): + ''' calls the REST API ''' + # TODO, log usage + + args = [self.rest_api, self.api] + kwargs = {} + if self.method.upper() == 'POST': + method = rest_generic.post_async + kwargs['body'] = self.body + kwargs['files'] = self.files + elif self.method.upper() == 'PATCH': + method = rest_generic.patch_async + args.append(None) # uuid should be provided in the API + kwargs['body'] = self.body + kwargs['files'] = self.files + elif self.method.upper() == 'DELETE': + method = rest_generic.delete_async + args.append(None) # uuid should be provided in the API + else: + self.module.warn('wait_for_completion ignored for %s method.' % self.method) + return self.run_api() + + kwargs.update({ + 'raw_error': True, + 'headers': self.build_headers() + }) + if self.query: + kwargs['query'] = self.query + response, error = method(*args, **kwargs) + self.fail_on_error(0, response, error) + + return 0, response + + def apply(self): + ''' calls the api and returns json output ''' + if self.module.check_mode: + status_code, response = None, {'check_mode': 'would run %s %s' % (self.method, self.api)} + elif self.wait_for_completion: + status_code, response = self.run_api_async() + else: + status_code, response = self.run_api() + self.module.exit_json(changed=True, status_code=status_code, response=response) + + +def main(): + """ + Execute action from playbook + """ + restapi = NetAppONTAPRestAPI() + restapi.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py new file mode 100644 index 000000000..3e7197933 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_buckets.py @@ -0,0 +1,586 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_s3_buckets +short_description: NetApp ONTAP S3 Buckets +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.19.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete, or modify S3 buckets on NetApp ONTAP. + +options: + state: + description: + - Whether the specified S3 bucket should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the S3 or NAS bucket. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + aggregates: + description: + - List of aggregates names to use for the S3 bucket. + - This option is not supported when I(type=nas). + type: list + elements: str + + constituents_per_aggregate: + description: + - Number of constituents per aggregate. + - This option is not supported when I(type=nas). + type: int + + size: + description: + - Size of the S3 bucket in bytes. + - This option is not supported when I(type=nas). + type: int + + comment: + description: + - Comment for the S3 bucket. + type: str + + type: + description: + - Specifies the bucket type. Valid values are "s3"and "nas". + type: str + choices: ['s3', 'nas'] + version_added: 22.6.0 + + nas_path: + description: + - Specifies the NAS path to which the nas bucket corresponds to. + type: str + version_added: 22.7.0 + + policy: + description: + - Access policy uses the Amazon Web Services (AWS) policy language syntax to allow S3 tenants to create access policies to their data + type: dict + suboptions: + statements: + description: + - Policy statements are built using this structure to specify permissions + - Grant to allow/deny to perform on when applies + type: list + elements: dict + suboptions: + sid: + description: Statement ID + type: str + resources: + description: + - The bucket and any object it contains. + - The wildcard characters * and ? can be used to form a regular expression for specifying a resource. + type: list + elements: str + actions: + description: + - You can specify * to mean all actions, or a list of one or more of the following + - GetObject + - PutObject + - DeleteObject + - ListBucket + - GetBucketAcl + - GetObjectAcl + - ListBucketMultipartUploads + - ListMultipartUploadParts + type: list + elements: str + effect: + description: The statement may allow or deny access + type: str + choices: + - allow + - deny + principals: + description: A list of one or more S3 users or groups. + type: list + elements: str + conditions: + description: Conditions for when a policy is in effect. + type: list + elements: dict + suboptions: + operator: + description: + - The operator to use for the condition. + type: str + choices: + - ip_address + - not_ip_address + - string_equals + - string_not_equals + - string_equals_ignore_case + - string_not_equals_ignore_case + - string_like + - string_not_like + - numeric_equals + - numeric_not_equals + - numeric_greater_than + - numeric_greater_than_equals + - numeric_less_than + - numeric_less_than_equals + max_keys: + description: + - The maximum number of keys that can be returned in a request. + type: list + elements: str + delimiters: + description: + - The delimiter used to identify a prefix in a list of objects. + type: list + elements: str + source_ips: + description: + - The source IP address of the request. + type: list + elements: str + prefixes: + description: + - The prefixes of the objects that you want to list. + type: list + elements: str + usernames: + description: + - The user names that you want to allow to access the bucket. + type: list + elements: str + + qos_policy: + description: + - A policy group defines measurable service level objectives (SLOs) that apply to the storage objects with which the policy group is associated. + - If you do not assign a policy group to a bucket, the system wil not monitor and control the traffic to it. + - This option is not supported when I(type=nas). + type: dict + suboptions: + max_throughput_iops: + description: The maximum throughput in IOPS. + type: int + max_throughput_mbps: + description: The maximum throughput in MBPS. + type: int + min_throughput_iops: + description: The minimum throughput in IOPS. + type: int + min_throughput_mbps: + description: The minimum throughput in MBPS. + type: int + name: + description: The QoS policy group name. This is mutually exclusive with other QoS attributes. + type: str + + audit_event_selector: + description: + - Audit event selector allows you to specify access and permission types to audit. + - This option is not supported when I(type=nas). + type: dict + suboptions: + access: + description: + - specifies the type of event access to be audited, read-only, write-only or all (default is all). + type: str + choices: + - read + - write + - all + permission: + description: + - specifies the type of event permission to be audited, allow-only, deny-only or all (default is all). + type: str + choices: + - allow + - deny + - all +notes: + - module will try to set desired C(audit_event_selector) if the bucket is not configured with audit_event_selector options, + but may not take effect if there is no audit configuration present in vserver. +''' + +EXAMPLES = """ + - name: Create S3 bucket + netapp.ontap.na_ontap_s3_buckets: + state: present + name: carchi-test-bucket + comment: carchi8py was here + size: 838860800 + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always + + - name: Create S3 bucket with a policy + netapp.ontap.na_ontap_s3_buckets: + state: present + name: carchi-test-bucket + comment: carchi8py was here + size: 838860800 + policy: + statements: + - sid: FullAccessToUser1 + resources: + - bucket1 + - bucket1/* + actions: + - GetObject + - PutObject + - DeleteObject + - ListBucket + effect: allow + conditions: + - operator: ip_address + max_keys: + - 1000 + delimiters: + - "/" + source_ips: + - 1.1.1.1 + - 1.2.2.0/24 + prefixes: + - prex + usernames: + - user1 + principals: + - user1 + - group/grp1 + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always + + - name: Delete S3 bucket + netapp.ontap.na_ontap_s3_buckets: + state: absent + name: carchi-test-bucket + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapS3Buckets: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + aggregates=dict(required=False, type='list', elements='str'), + constituents_per_aggregate=dict(required=False, type='int'), + size=dict(required=False, type='int'), + comment=dict(required=False, type='str'), + type=dict(required=False, type='str', choices=['s3', 'nas']), + nas_path=dict(required=False, type='str'), + policy=dict(type='dict', options=dict( + statements=dict(type='list', elements='dict', options=dict( + sid=dict(required=False, type='str'), + resources=dict(required=False, type='list', elements='str'), + actions=dict(required=False, type='list', elements='str'), + effect=dict(required=False, type='str', choices=['allow', 'deny']), + conditions=dict(type='list', elements='dict', options=dict( + operator=dict(required=False, type='str', choices=['ip_address', + 'not_ip_address', + 'string_equals', + 'string_not_equals', + 'string_equals_ignore_case', + 'string_not_equals_ignore_case', + 'string_like', + 'string_not_like', + 'numeric_equals', + 'numeric_not_equals', + 'numeric_greater_than', + 'numeric_greater_than_equals', + 'numeric_less_than', + 'numeric_less_than_equals']), + max_keys=dict(required=False, type='list', elements='str', no_log=False), + delimiters=dict(required=False, type='list', elements='str'), + source_ips=dict(required=False, type='list', elements='str'), + prefixes=dict(required=False, type='list', elements='str'), + usernames=dict(required=False, type='list', elements='str'))), + principals=dict(type='list', elements='str') + )))), + qos_policy=dict(type='dict', options=dict( + max_throughput_iops=dict(type='int'), + max_throughput_mbps=dict(type='int'), + name=dict(type='str'), + min_throughput_iops=dict(type='int'), + min_throughput_mbps=dict(type='int'), + )), + audit_event_selector=dict(type='dict', options=dict( + access=dict(type='str', choices=['read', 'write', 'all']), + permission=dict(type='str', choices=['allow', 'deny', 'all']))), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.uuid = None + self.volume_uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_bucket', 9, 8) + partially_supported_rest_properties = [['audit_event_selector', (9, 10, 1)], ['type', (9, 12, 1)], ['nas_path', (9, 12, 1)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties) + # few keys in policy.statements will be configured with default value if not set in create. + # so removing None entries to avoid idempotent issue in next run. + if self.parameters.get('policy'): + # below keys can be reset with empty list. + # - statements. + # - conditions. + # - actions. + # - principals. + self.parameters['policy'] = self.na_helper.filter_out_none_entries(self.parameters['policy'], True) + for statement in self.parameters['policy'].get('statements', []): + if {} in self.parameters['policy']['statements']: + self.module.fail_json(msg="Error: cannot set empty dict for policy statements.") + if len(statement.get('resources', [])) == 1 and statement['resources'] == ["*"]: + statement['resources'] = [self.parameters['name'], self.parameters['name'] + '/*'] + for condition in statement.get('conditions', []): + updated_ips = [] + for ip in condition.get('source_ips', []): + if '/' in ip: + updated_ips.append(ip) + else: + # if cidr notation not set in each ip, append /32. + # cidr unset ip address will return with /32 in next run. + updated_ips.append(ip + '/32') + if updated_ips: + condition['source_ips'] = updated_ips + + def get_s3_bucket(self): + api = 'protocols/s3/buckets' + fields = 'name,svm.name,size,comment,volume.uuid,policy,policy.statements,qos_policy' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 12, 1): + fields += ',audit_event_selector,type,nas_path' + elif self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + fields += ',audit_event_selector' + params = {'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'], + 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching S3 bucket %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return self.form_current(record) if record else None + + def form_current(self, record): + self.set_uuid(record) + body = { + 'comment': self.na_helper.safe_get(record, ['comment']), + 'size': self.na_helper.safe_get(record, ['size']), + 'policy': self.na_helper.safe_get(record, ['policy']), + 'qos_policy': self.na_helper.safe_get(record, ['qos_policy']), + 'audit_event_selector': self.na_helper.safe_get(record, ['audit_event_selector']), + 'type': self.na_helper.safe_get(record, ['type']), + 'nas_path': self.na_helper.safe_get(record, ['nas_path']) + } + if body['policy']: + for policy_statement in body['policy'].get('statements', []): + # So we treat SID as a String as it can accept Words, or Numbers. + # ONTAP will return it as a String, unless it is just + # numbers then it is returned as an INT. + policy_statement['sid'] = str(policy_statement['sid']) + # setting keys in each condition to None if not present to avoid idempotency issue. + if not policy_statement.get('conditions'): + policy_statement['conditions'] = [] + else: + for condition in policy_statement['conditions']: + condition['delimiters'] = condition.get('delimiters') + condition['max_keys'] = condition.get('max_keys') + condition['operator'] = condition.get('operator') + condition['prefixes'] = condition.get('prefixes') + condition['source_ips'] = condition.get('source_ips') + condition['usernames'] = condition.get('usernames') + # empty [] is used to reset policy statements. + # setting policy statements to [] to avoid idempotency issue. + else: + body['policy'] = {'statements': []} + return body + + def set_uuid(self, record): + self.uuid = record['uuid'] + self.svm_uuid = record['svm']['uuid'] + # volume key is not returned for NAS buckets. + self.volume_uuid = self.na_helper.safe_get(record, ['volume', 'uuid']) + + def create_s3_bucket(self): + api = 'protocols/s3/buckets' + body = {'svm.name': self.parameters['vserver'], 'name': self.parameters['name']} + body.update(self.form_create_or_modify_body()) + dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating S3 bucket %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_s3_bucket(self): + api = 'protocols/s3/buckets' + uuids = '%s/%s' % (self.svm_uuid, self.uuid) + dummy, error = rest_generic.delete_async(self.rest_api, api, uuids, job_timeout=120) + if error: + self.module.fail_json(msg='Error deleting S3 bucket %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_s3_bucket(self, modify): + api = 'protocols/s3/buckets' + uuids = '%s/%s' % (self.svm_uuid, self.uuid) + body = self.form_create_or_modify_body(modify) + dummy, error = rest_generic.patch_async(self.rest_api, api, uuids, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error modifying S3 bucket %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def form_create_or_modify_body(self, params=None): + if params is None: + params = self.parameters + body = {} + options = ['aggregates', 'constituents_per_aggregate', 'size', 'comment', 'type', 'nas_path', 'policy'] + for option in options: + if option in params: + body[option] = params[option] + if 'qos_policy' in params: + body['qos_policy'] = self.na_helper.filter_out_none_entries(params['qos_policy']) + if 'audit_event_selector' in params: + body['audit_event_selector'] = self.na_helper.filter_out_none_entries(params['audit_event_selector']) + return body + + def check_volume_aggr(self): + api = 'storage/volumes/%s' % self.volume_uuid + params = {'fields': 'aggregates.name'} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg=error) + aggr_names = [aggr['name'] for aggr in record['aggregates']] + if self.parameters.get('aggregates'): + if sorted(aggr_names) != sorted(self.parameters['aggregates']): + return True + return False + + def validate_modify_required(self, modify, current): + # if desired statement length different than current, allow modify. + if len(modify['policy']['statements']) != len(current['policy']['statements']): + return True + match_found = [] + for statement in modify['policy']['statements']: + for index, current_statement in enumerate(current['policy']['statements']): + # continue to next if the current statement already has a match. + if index in match_found: + continue + statement_modified = self.na_helper.get_modified_attributes(current_statement, statement) + # no modify required, match found for the statment. + # break the loop and check next desired policy statement has match. + if not statement_modified: + match_found.append(index) + break + # match not found, switch to next current statement and continue to find desired statement is present. + if len(statement_modified) > 1: + continue + # 'conditions' key in policy.statements is list type, each element is dict. + # if the len of the desired conditions different than current, allow for modify. + # check for modify if 'conditions' is the only key present in statement_modified. + # check for difference in each modify[policy.statements[index][conditions] with current[policy.statements[index][conditions]. + if statement_modified.get('conditions'): + if not current_statement['conditions']: + continue + if len(statement_modified.get('conditions')) != len(current_statement['conditions']): + continue + + # each condition should be checked for modify based on the operator key. + def require_modify(desired, current): + for condition in desired: + # operator is a required field for condition, if not present, REST will throw error. + if condition.get('operator'): + for current_condition in current: + if condition['operator'] == current_condition['operator']: + condition_modified = self.na_helper.get_modified_attributes(current_condition, condition) + if condition_modified: + return True + else: + return True + if not require_modify(statement_modified['conditions'], current_statement['conditions']): + match_found.append(index) + break + # allow modify + # - if not match found + # - if only partial policy statements has match found. + return not match_found or len(match_found) != len(modify['policy']['statements']) + + def apply(self): + current = self.get_s3_bucket() + cd_action, modify = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify.get('type'): + self.module.fail_json(msg='Error: cannot modify bucket type.') + if len(modify) == 1 and 'policy' in modify and current.get('policy'): + if modify['policy'].get('statements'): + self.na_helper.changed = self.validate_modify_required(modify, current) + if not self.na_helper.changed: + modify = False + # volume uuid returned only for s3 buckets. + if current and self.volume_uuid and self.check_volume_aggr(): + self.module.fail_json(msg='Aggregates cannot be modified for S3 bucket %s' % self.parameters['name']) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_s3_bucket() + if cd_action == 'delete': + self.delete_s3_bucket() + if modify: + self.modify_s3_bucket(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply volume operations from playbook''' + obj = NetAppOntapS3Buckets() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py new file mode 100644 index 000000000..e4b8d57f8 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_groups.py @@ -0,0 +1,234 @@ +#!/usr/bin/python + +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_s3_groups +short_description: NetApp ONTAP S3 groups +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.21.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete, or modify S3 groups on NetApp ONTAP. + +options: + state: + description: + - Whether the specified S3 group should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the S3 group. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + comment: + description: + - comment about the group + type: str + + users: + description: List of users to to add the the group + type: list + elements: dict + suboptions: + name: + description: username + type: str + + policies: + description: Policies to add the the group + type: list + elements: dict + suboptions: + name: + description: policy name + type: str +''' + +EXAMPLES = """ + - name: Create and modify a S3 Group + netapp.ontap.na_ontap_s3_groups: + state: present + name: dev-group + comment: group for devs + vserver: ansibleSVM + users: + - name: carchi8py + - name: carchi8py2 + policies: + - name: allow_policy + - name: deny_policy + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always + + - name: Delete a S3 Group + netapp.ontap.na_ontap_s3_groups: + state: absent + name: dev-group + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapS3Groups: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + comment=dict(required=False, type='str'), + users=dict(required=False, type='list', elements='dict', options=dict( + name=dict(required=False, type='str'))), + policies=dict(required=False, type='list', elements='dict', options=dict( + name=dict(required=False, type='str'))))) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + ) + self.svm_uuid = None + self.group_id = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_groups', 9, 8) + + def get_s3_groups(self): + self.get_svm_uuid() + api = 'protocols/s3/services/%s/groups' % self.svm_uuid + fields = ','.join(('name', + 'comment', + 'users.name', + 'policies.name')) + params = {'name': self.parameters['name'], + 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching S3 groups %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if record: + self.group_id = record.get('id') + return self.form_current(record) + return record + + @staticmethod + def form_current(record): + current = { + 'comment': record.get('comment'), + 'users': [], + 'policies': [], + } + # the APi Returning _link in each user and policy record which is causing modify to get called + if record.get('users'): + for user in record['users']: + current['users'].append({'name': user['name']}) + if record.get('policies'): + for policy in record['policies']: + current['policies'].append({'name': policy['name']}) + return current + + def create_s3_groups(self): + api = 'protocols/s3/services/%s/groups' % self.svm_uuid + body = {'name': self.parameters['name'], + 'users': self.parameters['users'], + 'policies': self.parameters['policies']} + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating S3 groups %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_s3_groups(self): + api = 'protocols/s3/services/%s/groups' % self.svm_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.group_id) + if error: + self.module.fail_json(msg='Error deleting S3 group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_s3_groups(self, modify): + api = 'protocols/s3/services/%s/groups' % self.svm_uuid + body = {} + if modify.get('comment') is not None: + body['comment'] = self.parameters['comment'] + if modify.get('users') is not None: + body['users'] = self.parameters['users'] + if modify.get('policies') is not None: + body['policies'] = self.parameters['policies'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.group_id, body) + if error: + self.module.fail_json(msg='Error modifying S3 group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_svm_uuid(self): + record, error = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + self.svm_uuid = record + + def apply(self): + current = self.get_s3_groups() + cd_action, modify = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if cd_action == 'create' and (self.na_helper.safe_get(self.parameters, ['users']) is None + or self.na_helper.safe_get(self.parameters, ['policies']) is None): + self.module.fail_json(msg='policies and users are required for a creating a group.') + if modify and (self.na_helper.safe_get(self.parameters, ['users']) is None + or self.na_helper.safe_get(self.parameters, ['policies']) is None): + self.module.fail_json(msg='policies and users can not be empty when modifying a group.') + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_s3_groups() + if cd_action == 'delete': + self.delete_s3_groups() + if modify: + self.modify_s3_groups(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapS3Groups() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py new file mode 100644 index 000000000..7b54efbbe --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_policies.py @@ -0,0 +1,246 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_s3_policies +short_description: NetApp ONTAP S3 Policies +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.21.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete, or modify S3 Policies on NetApp ONTAP. + +options: + state: + description: + - Whether the specified S3 policy should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the S3 policy. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + comment: + description: + - comment about the policy + type: str + + statements: + description: + - Policy statements are built using this structure to specify permissions + - Grant to allow/deny to perform on when applies + type: list + elements: dict + suboptions: + sid: + description: Statement ID + type: str + required: true + resources: + description: + - The bucket and any object it contains. + - The wildcard characters * and ? can be used to form a regular expression for specifying a resource. + type: list + elements: str + required: true + actions: + description: + - You can specify * to mean all actions, or a list of one or more of the following + - GetObject + - PutObject + - DeleteObject + - ListBucket + - GetBucketAcl + - GetObjectAcl + - ListBucketMultipartUploads + - ListMultipartUploadParts + type: list + elements: str + required: true + effect: + description: The statement may allow or deny access + type: str + choices: + - allow + - deny + required: true +''' +EXAMPLES = """ + - name: Create and modify a S3 policy + netapp.ontap.na_ontap_s3_policies: + state: present + name: carchi-s3-policy + comment: carchi8py was here + vserver: ansibleSVM + statements: + - sid: 1 + resources: + - "bucket1" + - "bucket1/*" + actions: + - "*" + effect: + allow + - sid: 2 + resources: + - "bucket2" + - "bucket2/*" + actions: + - "*" + effect: + allow + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always + + - name: delete S3 policy + netapp.ontap.na_ontap_s3_policies: + state: absent + name: carchi-s3-policy + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always +""" +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapS3Policies: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + comment=dict(required=False, type='str'), + statements=dict(type='list', elements='dict', options=dict( + sid=dict(required=True, type='str'), + resources=dict(required=True, type='list', elements='str'), + actions=dict(required=True, type='list', elements='str'), + effect=dict(required=True, type='str', choices=['allow', 'deny']), + )))) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_policies', 9, 8) + + def get_s3_policies(self): + self.get_svm_uuid() + api = 'protocols/s3/services/%s/policies' % self.svm_uuid + fields = ','.join(('name', + 'comment', + 'statements')) + params = {'name': self.parameters['name'], + 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching S3 policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + # sid is an Str or a number, it will return a string back unless you pass a number then it returns a int + if record: + for each in record['statements']: + each['sid'] = str(each['sid']) + return record + + def get_svm_uuid(self): + uuid, dummy = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + self.svm_uuid = uuid + + def create_s3_policies(self): + api = 'protocols/s3/services/%s/policies' % self.svm_uuid + body = {'name': self.parameters['name']} + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + if self.parameters.get('statements'): + body['statements'] = self.parameters['statements'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating S3 policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_s3_policies(self): + api = 'protocols/s3/services/%s/policies' % self.svm_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name']) + if error: + self.module.fail_json(msg='Error deleting S3 policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_s3_policies(self, modify): + api = 'protocols/s3/services/%s/policies' % self.svm_uuid + body = {} + if modify.get('comment'): + body['comment'] = self.parameters['comment'] + if self.parameters.get('statements'): + body['statements'] = self.parameters['statements'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body) + if error: + self.module.fail_json(msg='Error modifying S3 policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_s3_policies() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_s3_policies() + if cd_action == 'delete': + self.delete_s3_policies() + if modify: + self.modify_s3_policies(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply volume operations from playbook''' + obj = NetAppOntapS3Policies() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py new file mode 100644 index 000000000..ff5feb722 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_services.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_s3_services +short_description: NetApp ONTAP S3 services +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.20.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete or modify S3 Service + +options: + state: + description: + - Whether the specified S3 bucket should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the S3 service. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + enabled: + description: + - enable or disable the service + type: bool + + comment: + description: + - comment about the service + type: str + + certificate_name: + description: + - name of https certificate to use for the service + type: str +''' + +EXAMPLES = """ + - name: create or modify s3 service + na_ontap_s3_services: + state: present + name: carchi-test + vserver: ansibleSVM + comment: not enabled + enabled: False + certificate_name: ansibleSVM_16E1C1284D889609 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always + + - name: delete s3 service + na_ontap_s3_services: + state: absent + name: carchi-test + vserver: ansibleSVM + certificate_name: ansibleSVM_16E1C1284D889609 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always +""" + + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapS3Services: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + enabled=dict(required=False, type='bool'), + vserver=dict(required=True, type='str'), + comment=dict(required=False, type='str'), + certificate_name=dict(required=False, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + + self.rest_api = OntapRestAPI(self.module) + partially_supported_rest_properties = [] # TODO: Remove if there nothing here + self.use_rest = self.rest_api.is_rest(partially_supported_rest_properties=partially_supported_rest_properties, + parameters=self.parameters) + + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_services', 9, 8) + + def get_s3_service(self): + api = 'protocols/s3/services' + fields = ','.join(('name', + 'enabled', + 'svm.uuid', + 'comment', + 'certificate.name')) + + params = { + 'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'], + 'fields': fields + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching S3 service %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if record: + if self.na_helper.safe_get(record, ['certificate', 'name']): + record['certificate_name'] = self.na_helper.safe_get(record, ['certificate', 'name']) + return self.set_uuids(record) + return None + + def create_s3_service(self): + api = 'protocols/s3/services' + body = {'svm.name': self.parameters['vserver'], 'name': self.parameters['name']} + if self.parameters.get('enabled') is not None: + body['enabled'] = self.parameters['enabled'] + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + if self.parameters.get('certificate_name'): + body['certificate.name'] = self.parameters['certificate_name'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating S3 service %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_s3_service(self): + api = 'protocols/s3/services' + # The rest default is to delete all users, and empty bucket attached to a service. + # This would not be idempotent, so switching this to False. + # second issue, delete_all: True will say it deleted, but the ONTAP system will show it's still there until the job for the + # delete buckets/users/groups is complete. + body = {'delete_all': False} + dummy, error = rest_generic.delete_async(self.rest_api, api, self.svm_uuid, body=body) + if error: + self.module.fail_json(msg='Error deleting S3 service %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_s3_service(self, modify): + # Once the service is created, bucket and user can not be modified by the service api, but only the user/group/bucket modules + api = 'protocols/s3/services' + body = {'name': self.parameters['name']} + if modify.get('enabled') is not None: + body['enabled'] = self.parameters['enabled'] + if modify.get('comment'): + body['comment'] = self.parameters['comment'] + if modify.get('certificate_name'): + body['certificate.name'] = self.parameters['certificate_name'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body) + if error: + self.module.fail_json(msg='Error modifying S3 service %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def set_uuids(self, record): + self.svm_uuid = record['svm']['uuid'] + return record + + def apply(self): + current = self.get_s3_service() + cd_action, modify = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_s3_service() + if cd_action == 'delete': + self.delete_s3_service() + if modify: + self.modify_s3_service(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + '''Apply volume operations from playbook''' + obj = NetAppOntapS3Services() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py new file mode 100644 index 000000000..d3a0efd64 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_s3_users.py @@ -0,0 +1,193 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_s3_users +short_description: NetApp ONTAP S3 users +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.20.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create, delete, or modify S3 users on NetApp ONTAP. + +options: + state: + description: + - Whether the specified S3 user should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the S3 user. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + comment: + description: + - comment about the user + type: str +''' + +EXAMPLES = """ + - name: create or modify s3 user + na_ontap_s3_users: + state: present + name: carchi8py + vserver: ansibleSVM + comment: not enabled + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always + + - name: delete s3 user + na_ontap_s3_users: + state: absent + name: carchi8py + vserver: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + use_rest: always +""" + +RETURN = """ +secret_key: + description: secret_key generated for the user + returned: on creation of user + type: str +access_key: + description: access_key generated for the user + returned: on creation of user + type: str +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapS3Users: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + name=dict(required=True, type='str'), + comment=dict(required=False, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.svm_uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_s3_users', 9, 8) + + def get_s3_user(self): + self.get_svm_uuid() + api = 'protocols/s3/services/%s/users' % self.svm_uuid + fields = ','.join(('name', + 'comment')) + params = {'name': self.parameters['name'], + 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching S3 user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return record + + def get_svm_uuid(self): + record, error = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + self.svm_uuid = record + + def create_s3_user(self): + api = 'protocols/s3/services/%s/users' % self.svm_uuid + body = {'name': self.parameters['name']} + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + response, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating S3 user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if response.get('num_records') == 1: + return response.get('records')[0] + self.module.fail_json(msg='Error creating S3 user %s' % self.parameters['name'], exception=traceback.format_exc()) + + def delete_s3_user(self): + api = 'protocols/s3/services/%s/users' % self.svm_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name']) + if error: + self.module.fail_json(msg='Error deleting S3 user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_s3_user(self, modify): + api = 'protocols/s3/services/%s/users' % self.svm_uuid + body = {} + if modify.get('comment'): + body['comment'] = self.parameters['comment'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body) + if error: + self.module.fail_json(msg='Error modifying S3 user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def parse_response(self, response): + if response is not None: + return response.get('secret_key'), response.get('access_key') + return None, None + + def apply(self): + current = self.get_s3_user() + cd_action, modify, response = None, None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + response = self.create_s3_user() + if cd_action == 'delete': + self.delete_s3_user() + if modify: + self.modify_s3_user(modify) + secret_key, access_key = self.parse_response(response) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'secret_key': secret_key, + 'access_key': access_key}) + self.module.exit_json(**result) + + +def main(): + '''Apply volume operations from playbook''' + obj = NetAppOntapS3Users() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py new file mode 100644 index 000000000..c7131fe5e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_certificates.py @@ -0,0 +1,468 @@ +#!/usr/bin/python + +# (c) 2020-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_security_certificates +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_security_certificates +short_description: NetApp ONTAP manage security certificates. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.7.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- Install or delete security certificates on ONTAP. (Create and sign will come in a second iteration) + +options: + + state: + description: + - Whether the specified security certificate should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + common_name: + description: + - Common name of the certificate. + - Required for create and install. + - If name is present, ignored for sign and delete. + - If name is absent or ignored, required for sign and delete. + type: str + + name: + description: + - The unique name of the security certificate per SVM. + - This parameter is not supported for ONTAP 9.6 or 9.7, as the REST API does not support it. + - If present with ONTAP 9.6 or 9.7, it is ignored by default, see I(ignore_name_if_not_supported). + - It is strongly recommended to use name for newer releases of ONTAP. + type: str + + svm: + description: + - The name of the SVM (vserver). + - If present, the certificate is installed in the SVM. + - If absent, the certificate is installed in the cluster. + type: str + aliases: + - vserver + + type: + description: + - Type of certificate + - Required for create and install. + - If name is present, ignored for sign and delete. + - If name is absent or ignored, required for sign and delete. + choices: ['client', 'server', 'client_ca', 'server_ca', 'root_ca'] + type: str + + public_certificate: + description: + - Public key certificate in PEM format. + - Required when installing a certificate. Ignored otherwise. + type: str + + private_key: + description: + - Private key certificate in PEM format. + - Required when installing a CA-signed certificate. Ignored otherwise. + type: str + + signing_request: + description: + - If present, the certificate identified by name and svm is used to sign the request. + - A signed certificate is returned. + type: str + + expiry_time: + description: + - Certificate expiration time. Specifying an expiration time is recommended when creating a certificate. + - Can be provided when signing a certificate. + type: str + + key_size: + description: + - Key size of the certificate in bits. Specifying a strong key size is recommended when creating a certificate. + - Ignored for sign and delete. + type: int + + hash_function: + description: + - Hashing function. Can be provided when creating a self-signed certificate or when signing a certificate. + - Allowed values for create and sign are sha256, sha224, sha384, sha512. + type: str + + intermediate_certificates: + description: + - Chain of intermediate Certificates in PEM format. + - Only valid when installing a certificate. + type: list + elements: str + + ignore_name_if_not_supported: + description: + - ONTAP 9.6 and 9.7 REST API does not support I(name). + - If set to true, no error is reported if I(name) is present, and I(name) is not used. + type: bool + default: true + version_added: '20.8.0' + +notes: + - supports check mode. + - only supports REST. Requires ONTAP 9.6 or later, ONTAP 9.8 or later is recommended. +''' + +EXAMPLES = """ +- name: install certificate + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + common_name: "{{ ontap_cert_common_name }}" + name: "{{ ontap_cert_name }}" + public_certificate: "{{ ssl_certificate }}" + type: client_ca + svm: "{{ vserver }}" + +# ignore svm option for cluster/admin vserver. +- name: install certificate in cluster vserver. + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + common_name: "{{ ontap_cert_common_name }}" + name: "{{ ontap_cert_name }}" + public_certificate: "{{ ssl_certificate }}" + type: client_ca + +- name: create certificate + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + common_name: "{{ ontap_cert_root_common_name }}" + name: "{{ ontap_cert_name }}" + type: root_ca + svm: "{{ vserver }}" + expiry_time: P365DT # one year + +- name: sign certificate using newly create certificate + tags: sign_request + netapp.ontap.na_ontap_security_certificates: + # <<: *login + name: "{{ ontap_cert_name }}" + svm: "{{ vserver }}" + signing_request: | + -----BEGIN CERTIFICATE REQUEST----- + MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH + DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE + ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2 + tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q + EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm + BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE + jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB + CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU + Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln + /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J + UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2 + JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu + fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac + -----END CERTIFICATE REQUEST----- + expiry_time: P180DT + +- name: delete certificate + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + state: absent + name: "{{ ontap_cert_name }}" + svm: "{{ vserver }}" + +# For ONTAP 9.6 or 9.7, use common_name and type, in addition to, or in lieu of name +- name: install certificate + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + common_name: "{{ ontap_cert_common_name }}" + public_certificate: "{{ ssl_certificate }}" + type: client_ca + svm: "{{ vserver }}" + +- name: create certificate + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + common_name: "{{ ontap_cert_root_common_name }}" + type: root_ca + svm: "{{ vserver }}" + expiry_time: P365DT # one year + +- name: sign certificate using newly create certificate + tags: sign_request + netapp.ontap.na_ontap_security_certificates: + # <<: *login + common_name: "{{ ontap_cert_root_common_name }}" + type: root_ca + svm: "{{ vserver }}" + signing_request: | + -----BEGIN CERTIFICATE REQUEST----- + MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQH + DAlTdW5ueXZhbGUxDzANBgNVBAoMBk5ldEFwcDCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBALgXCj6Si/I4xLdV7wjWYTbt8jY20fQOjk/4E7yBT1vFBflE + ks6YDc6dhC2G18cnoj9E3DiR8lIHPoAlFB/VmBNDev3GZkbFlrbV7qYmf8OEx2H2 + tAefgSP0jLmCHCN1yyhJoCG6FsAiD3tf6yoyFF6qS9ureGL0tCJJ/osx64WzUz+Q + EN8lx7VSxriEFMSjreXZDhUFaCdIYKKRENuEWyYvdy5cbBmczhuM8EP6peOVv5Hm + BJzPUDkq7oTtEHmttpATq2Y92qzNzETO0bXN5X/93AWri8/yEXdX+HEw1C/omtsE + jGsCXrCrIJ+DgUdT/GHNdBWlXl/cWGtEgEQ4vrUCAwEAAaAAMA0GCSqGSIb3DQEB + CwUAA4IBAQBjZNoQgr/JDm1T8zyRhLkl3zw4a16qKNu/MS7prqZHLVQgrptHRegU + Hbz11XoHfVOdbyuvtzEe95QsDd6FYCZ4qzZRF3se4IjMeqwdQZ5WP0/GFiwM8Uln + /0TCWjt759XMeUX7+wgOg5NRjJ660eWMXzu/UJf+vZO0Q2FiPIr13JvvY3TjT+9J + UUtK4r9PaUuOPN2YL9IQqSD3goh8302Qr3nBXUgjeUGLkgfUM5S39apund2hyTX2 + JCLQsKr88pwU9iDho2tHLv/2QgLwNZLPu8V+7IGu6G4vB28lN4Uy7xbhxFOKtyWu + fK4sEdTw3B/aDN0tB8MHFdPYycNZsEac + -----END CERTIFICATE REQUEST----- + expiry_time: P180DT + +- name: delete certificate + netapp.ontap.na_ontap_security_certificates: + # <<: *cert_login + state: absent + common_name: "{{ ontap_cert_root_common_name }}" + type: root_ca + name: "{{ ontap_cert_name }}" + svm: "{{ vserver }}" +""" + +RETURN = """ +ontap_info: + description: Returns public_certificate when signing, empty for create, install, and delete. + returned: always + type: dict + sample: '{ + "ontap_info": { + "public_certificate": "-----BEGIN CERTIFICATE-----\n........-----END CERTIFICATE-----\n" + } + }' +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapSecurityCertificates: + ''' object initialize and class methods ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + common_name=dict(required=False, type='str'), + name=dict(required=False, type='str'), + state=dict(required=False, choices=['present', 'absent'], default='present'), + type=dict(required=False, choices=['client', 'server', 'client_ca', 'server_ca', 'root_ca']), + svm=dict(required=False, type='str', aliases=['vserver']), + public_certificate=dict(required=False, type='str'), + private_key=dict(required=False, type='str', no_log=True), + signing_request=dict(required=False, type='str'), + expiry_time=dict(required=False, type='str'), + key_size=dict(required=False, type='int'), + hash_function=dict(required=False, type='str'), + intermediate_certificates=dict(required=False, type='list', elements='str'), + ignore_name_if_not_supported=dict(required=False, type='bool', default=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + if self.parameters.get('name') is None and (self.parameters.get('common_name') is None or self.parameters.get('type') is None): + error = "Error: 'name' or ('common_name' and 'type') are required parameters." + self.module.fail_json(msg=error) + + # ONTAP 9.6 and 9.7 do not support name. We'll change this to True if we detect an issue. + self.ignore_name_param = False + + # API should be used for ONTAP 9.6 or higher + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + else: + self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_security_certificates')) + + def get_certificate(self): + """ + Fetch uuid if certificate exists. + NOTE: because of a bug in ONTAP 9.6 and 9.7, name is not supported. We are + falling back to using common_name and type, but unicity is not guaranteed. + :return: + Dictionary if certificate with same name is found + None if not found + """ + # REST allows setting cluster/admin svm in create certificate, but no records returned in GET. + # error if data svm not found + if 'svm' in self.parameters: + rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['svm'], self.module, True) + + error = "'name' or ('common_name', 'type') are required." + for key in ('name', 'common_name'): + if self.parameters.get(key) is None: + continue + data = {'fields': 'uuid', + key: self.parameters[key], + } + if self.parameters.get('svm') is not None: + data['svm.name'] = self.parameters['svm'] + else: + data['scope'] = 'cluster' + if key == 'common_name': + if self.parameters.get('type') is not None: + data['type'] = self.parameters['type'] + else: + error = "When using 'common_name', 'type' is required." + break + + api = "security/certificates" + message, error = self.rest_api.get(api, data) + if error: + try: + name_not_supported_error = (key == 'name') and (error['message'] == 'Unexpected argument "name".') + except (KeyError, TypeError): + name_not_supported_error = False + if name_not_supported_error: + if self.parameters['ignore_name_if_not_supported'] and self.parameters.get('common_name') is not None: + # let's attempt a retry using common_name + self.ignore_name_param = True + continue + error = "ONTAP 9.6 and 9.7 do not support 'name'. Use 'common_name' and 'type' as a work-around." + # report success, or any other error as is + break + + if error: + self.module.fail_json(msg='Error calling API: %s - %s' % (api, error)) + + if len(message['records']) == 1: + return message['records'][0] + if len(message['records']) > 1: + error = 'Duplicate records with same common_name are preventing safe operations: %s' % repr(message) + self.module.fail_json(msg=error) + return None + + def create_or_install_certificate(self, validate_only=False): + """ + Create or install certificate + :return: message (should be empty dict) + """ + required_keys = ['type', 'common_name'] + if validate_only: + if not set(required_keys).issubset(set(self.parameters.keys())): + self.module.fail_json(msg='Error creating or installing certificate: one or more of the following options are missing: %s' + % (', '.join(required_keys))) + return + + optional_keys = ['public_certificate', 'private_key', 'expiry_time', 'key_size', 'hash_function', 'intermediate_certificates'] + if not self.ignore_name_param: + optional_keys.append('name') + # special key: svm + + body = {} + if self.parameters.get('svm') is not None: + body['svm'] = {'name': self.parameters['svm']} + for key in required_keys + optional_keys: + if self.parameters.get(key) is not None: + body[key] = self.parameters[key] + api = "security/certificates" + message, error = self.rest_api.post(api, body) + if error: + if self.parameters.get('svm') is None and error.get('target') == 'uuid': + error['target'] = 'cluster' + if error.get('message') == 'duplicate entry': + error['message'] += '. Same certificate may already exist under a different name.' + self.module.fail_json(msg="Error creating or installing certificate: %s" % error) + return message + + def sign_certificate(self, uuid): + """ + sign certificate + :return: a dictionary with key "public_certificate" + """ + api = "security/certificates/%s/sign" % uuid + body = {'signing_request': self.parameters['signing_request']} + optional_keys = ['expiry_time', 'hash_function'] + for key in optional_keys: + if self.parameters.get(key) is not None: + body[key] = self.parameters[key] + message, error = self.rest_api.post(api, body) + if error: + self.module.fail_json(msg="Error signing certificate: %s" % error) + return message + + def delete_certificate(self, uuid): + """ + Delete certificate + :return: message (should be empty dict) + """ + api = "security/certificates/%s" % uuid + message, error = self.rest_api.delete(api) + if error: + self.module.fail_json(msg="Error deleting certificate: %s" % error) + return message + + def apply(self): + """ + Apply action to create/install/sign/delete certificate + :return: None + """ + # TODO: add telemetry for REST + + current = self.get_certificate() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + message = None + if self.parameters.get('signing_request') is not None: + error = None + if self.parameters['state'] == 'absent': + error = "'signing_request' is not supported with 'state' set to 'absent'" + elif current is None: + scope = 'cluster' if self.parameters.get('svm') is None else "svm: %s" % self.parameters.get('svm') + error = "signing certificate with name '%s' not found on %s" % (self.parameters.get('name'), scope) + elif cd_action is not None: + error = "'signing_request' is exclusive with other actions: create, install, delete" + if error is not None: + self.module.fail_json(msg=error) + cd_action = 'sign' + self.na_helper.changed = True + + if self.na_helper.changed and cd_action == 'create': + # validate as much as we can in check_mode or not + self.create_or_install_certificate(validate_only=True) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + message = self.create_or_install_certificate() + elif cd_action == 'sign': + message = self.sign_certificate(current['uuid']) + elif cd_action == 'delete': + message = self.delete_certificate(current['uuid']) + + results = netapp_utils.generate_result(self.na_helper.changed, cd_action, extra_responses={'ontap_info': message}) + self.module.exit_json(**results) + + +def main(): + """ + Create instance and invoke apply + :return: None + """ + sec_cert = NetAppOntapSecurityCertificates() + sec_cert.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py new file mode 100644 index 000000000..aac0ea1d5 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_config.py @@ -0,0 +1,285 @@ +#!/usr/bin/python + +# (c) 2021-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_security_config +short_description: NetApp ONTAP modify security config for SSL. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Modifies the security configuration for SSL. +options: + name: + description: + - The type of FIPS compliant interface. + type: str + default: ssl + + is_fips_enabled: + description: + - Enables or disables FIPS-compliant mode for the cluster. + - For REST, it requires ontap version 9.8. + type: bool + + supported_ciphers: + description: + - Selects the supported cipher suites for the selected interface. + - This option is supported only in ZAPI. + type: str + + supported_protocols: + description: + - Selects the supported protocols for the selected interface. Supported_ciphers should not be specified if operating in FIPS-compliant mode. + - For REST, it requires ontap version 9.10.1 or later. + - Protocol versions can be removed only from lower versions. + - To remove protocol TLSv1 has to be removed first. + choices: ['TLSv1.3', 'TLSv1.2', 'TLSv1.1', 'TLSv1'] + type: list + elements: str + + supported_cipher_suites: + description: + - Names a cipher suite that the system can select during TLS handshakes. + - A list of available options can be found on the Internet Assigned Number Authority (IANA) website. + - To achieve idempotency all similar cipher_suites must be set. + - This option requires ontap version 9.10.1 or later. + type: list + elements: str + version_added: 22.4.0 +""" + +EXAMPLES = """ + - name: Modify SSL Security Config - ZAPI + netapp.ontap.na_ontap_security_config: + name: ssl + is_fips_enabled: false + supported_ciphers: 'ALL:!LOW:!aNULL:!EXP:!eNULL:!3DES:!RC4:!SHA1' + supported_protocols: ['TLSv1.2', 'TLSv1.1', 'TLSv1'] + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ontapi: "{{ ontap_info.ontap_info.ontap_version }}" + https: true + validate_certs: false + + - name: Modify SSL Security Config - REST + netapp.ontap.na_ontap_security_config: + is_fips_enabled: false + supported_protocols: ['TLSv1.2', 'TLSv1.1', 'TLSv1'] + supported_cipher_suites: ['TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384'] + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + ontapi: "{{ ontap_info.ontap_info.ontap_version }}" + https: true + validate_certs: false +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSecurityConfig: + """ + Modifies SSL Security Config + """ + def __init__(self): + """ + Initialize the ONTAP Security Config class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=False, type='str', default='ssl'), + is_fips_enabled=dict(required=False, type='bool'), + supported_ciphers=dict(required=False, type='str'), + supported_protocols=dict(required=False, type='list', elements='str', choices=['TLSv1.3', 'TLSv1.2', 'TLSv1.1', 'TLSv1']), + supported_cipher_suites=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + unsupported_rest_properties = ['supported_ciphers'] + partially_supported_rest_properties = [['supported_cipher_suites', (9, 10, 1)], ['supported_protocols', (9, 10, 1)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + if self.use_rest and self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_config', 9, 8, 0): + msg = 'REST requires ONTAP 9.8 or later.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not self.use_rest: + if self.parameters.get('supported_cipher_suites'): + self.module.fail_json(msg="Error: The option supported_cipher_suites is supported only with REST.") + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg='The python NetApp-Lib module is required') + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + # Supported_ciphers is supported in ZAPI only. + if 'is_fips_enabled' in self.parameters and 'supported_ciphers' in self.parameters: + # if fips is enabled, supported ciphers should not be specified. + if self.parameters['is_fips_enabled']: + self.module.fail_json( + msg='is_fips_enabled was specified as true and supported_ciphers was specified. \ + If fips is enabled then supported ciphers should not be specified') + + if 'supported_ciphers' in self.parameters: + self.parameters['supported_ciphers'] = self.parameters['supported_ciphers'].replace('\\', '') + + if 'is_fips_enabled' in self.parameters and 'supported_protocols' in self.parameters: + # if fips is enabled, TLSv1 is not a supported protocol. + if self.parameters['is_fips_enabled'] and 'TLSv1' in self.parameters['supported_protocols']: + self.module.fail_json( + msg='is_fips_enabled was specified as true and TLSv1 was specified as a supported protocol. \ + If fips is enabled then TLSv1 is not a supported protocol') + # if fips is enabled, TLSv1.1 is not a supported protocol. + if self.parameters['is_fips_enabled'] and 'TLSv1.1' in self.parameters['supported_protocols']: + self.module.fail_json( + msg='is_fips_enabled was specified as true and TLSv1.1 was specified as a supported protocol. \ + If fips is enabled then TLSv1.1 is not a supported protocol') + + def get_security_config(self): + """ + Get the current security configuration + """ + if self.use_rest: + return self.get_security_config_rest() + + return_value = None + + security_config_get_iter = netapp_utils.zapi.NaElement('security-config-get') + security_config_info = netapp_utils.zapi.NaElement('desired-attributes') + if 'is_fips_enabled' in self.parameters: + security_config_info.add_new_child( + 'is-fips-enabled', self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_fips_enabled']) + ) + if 'supported_ciphers' in self.parameters: + security_config_info.add_new_child('supported-ciphers', self.parameters['supported_ciphers']) + if 'supported_protocols' in self.parameters: + security_config_info.add_new_child('supported-protocols', ','.join(self.parameters['supported_protocols'])) + + security_config_get_iter.add_child_elem(security_config_info) + security_config_get_iter.add_new_child('interface', self.parameters['name']) + try: + result = self.server.invoke_successfully(security_config_get_iter, True) + security_supported_protocols = [] + if result.get_child_by_name('attributes'): + attributes = result.get_child_by_name('attributes') + security_config_attributes = attributes.get_child_by_name('security-config-info') + supported_protocols = security_config_attributes.get_child_by_name('supported-protocols') + for supported_protocol in supported_protocols.get_children(): + security_supported_protocols.append(supported_protocol.get_content()) + return_value = { + 'name': security_config_attributes['interface'], + 'is_fips_enabled': self.na_helper.get_value_for_bool(from_zapi=True, value=security_config_attributes['is-fips-enabled']), + 'supported_ciphers': security_config_attributes['supported-ciphers'], + 'supported_protocols': security_supported_protocols, + } + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error getting security config for interface %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + return return_value + + def modify_security_config(self, modify): + """ + Modifies the security configuration. + """ + if self.use_rest: + return self.modify_security_config_rest(modify) + + security_config_obj = netapp_utils.zapi.NaElement("security-config-modify") + security_config_obj.add_new_child("interface", self.parameters['name']) + if 'is_fips_enabled' in self.parameters: + self.parameters['is_fips_enabled'] = self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters['is_fips_enabled']) + security_config_obj.add_new_child('is-fips-enabled', self.parameters['is_fips_enabled']) + if 'supported_ciphers' in self.parameters: + security_config_obj.add_new_child('supported-ciphers', self.parameters['supported_ciphers']) + if 'supported_protocols' in self.parameters: + supported_protocol_obj = netapp_utils.zapi.NaElement("supported-protocols") + for protocol in self.parameters['supported_protocols']: + supported_protocol_obj.add_new_child('string', protocol) + security_config_obj.add_child_elem(supported_protocol_obj) + try: + self.server.invoke_successfully(security_config_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error modifying security config for interface %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc() + ) + + def get_security_config_rest(self): + """ + Get the current security configuration + """ + fields = 'fips.enabled,' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + fields += 'tls.cipher_suites,tls.protocol_versions' + record, error = rest_generic.get_one_record(self.rest_api, '/security', None, fields) + if error: + self.module.fail_json(msg="Error on getting security config: %s" % error) + if record: + return { + 'is_fips_enabled': self.na_helper.safe_get(record, ['fips', 'enabled']), + 'supported_cipher_suites': self.na_helper.safe_get(record, ['tls', 'cipher_suites']), + 'supported_protocols': self.na_helper.safe_get(record, ['tls', 'protocol_versions']) + } + return record + + def modify_security_config_rest(self, modify): + """ + Modify the current security configuration + """ + body = {} + if 'is_fips_enabled' in modify: + body['fips.enabled'] = modify['is_fips_enabled'] + if 'supported_cipher_suites' in modify: + body['tls.cipher_suites'] = modify['supported_cipher_suites'] + if 'supported_protocols' in modify: + body['tls.protocol_versions'] = modify['supported_protocols'] + record, error = rest_generic.patch_async(self.rest_api, '/security', None, body) + if error: + self.module.fail_json(msg="Error on modifying security config: %s" % error) + + def apply(self): + current = self.get_security_config() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + self.modify_security_config(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp ONTAP security config object and runs the correct play task + """ + obj = NetAppOntapSecurityConfig() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py new file mode 100644 index 000000000..6ddd78b90 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_ca_certificate.py @@ -0,0 +1,184 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_security_ipsec_ca_certificate +short_description: NetApp ONTAP module to add or delete ipsec ca certificate. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.1.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Create or delete security IPsec CA Certificate. +options: + state: + description: + - Create or delete security IPsec CA Certificate. + - The certificate must already be installed on the system, for instance using na_ontap_security_certificates. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - Name of the CA certificate. + - Certificate must be already installed in svm or cluster scope. + type: str + required: true + svm: + description: + - Name of svm. + - If not set cluster scope is assumed. + type: str + required: false + +notes: + - Supports check_mode. + - Only supported with REST and requires ONTAP 9.10.1 or later. +""" + +EXAMPLES = """ + - name: Add IPsec CA certificate to svm. + netapp.ontap.na_ontap_security_ipsec_ca_certificate: + name: cert1 + svm: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Delete IPsec CA certificate in svm. + netapp.ontap.na_ontap_security_ipsec_ca_certificate: + name: cert1 + svm: ansibleSVM + state: absent + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Add IPsec CA certificate to cluster. + netapp.ontap.na_ontap_security_ipsec_ca_certificate: + name: cert2 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Delete IPsec CA certificate from cluster. + netapp.ontap.na_ontap_security_ipsec_ca_certificate: + name: cert2 + state: absent + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSecurityCACertificate: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + svm=dict(required=False, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ipsec_ca_certificate', 9, 10, 1) + + def get_certificate_uuid(self): + """Get certificate UUID.""" + api = 'security/certificates' + query = {'name': self.parameters['name']} + if self.parameters.get('svm'): + query['svm.name'] = self.parameters['svm'] + else: + query['scope'] = 'cluster' + record, error = rest_generic.get_one_record(self.rest_api, api, query, 'uuid') + if error: + self.module.fail_json(msg="Error fetching uuid for certificate %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if record: + return record['uuid'] + return None + + def get_ipsec_ca_certificate(self): + """GET IPsec CA certificate record""" + self.uuid = self.get_certificate_uuid() + if self.uuid is None: + if self.parameters['state'] == 'absent': + return None + svm_or_scope = self.parameters['svm'] if self.parameters.get('svm') else 'cluster' + self.module.fail_json(msg="Error: certificate %s is not installed in %s" % (self.parameters['name'], svm_or_scope)) + api = 'security/ipsec/ca-certificates/%s' % self.uuid + record, error = rest_generic.get_one_record(self.rest_api, api) + if error: + # REST returns error if ipsec ca-certificates doesn't exist. + if "entry doesn't exist" in error: + return None + self.module.fail_json(msg="Error fetching security IPsec CA certificate %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return record if record else None + + def create_ipsec_ca_certificate(self): + """Create IPsec CA certifcate""" + api = 'security/ipsec/ca-certificates' + body = {'certificate.uuid': self.uuid} + if self.parameters.get('svm'): + body['svm.name'] = self.parameters['svm'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error adding security IPsec CA certificate %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_ipsec_ca_certificate(self): + """Delete IPSec CA certificate""" + api = 'security/ipsec/ca-certificates' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg="Error deleting security IPsec CA certificate %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_ipsec_ca_certificate() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_ipsec_ca_certificate() + else: + self.delete_ipsec_ca_certificate() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + ipsec_ca_obj = NetAppOntapSecurityCACertificate() + ipsec_ca_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py new file mode 100644 index 000000000..ce2fde68f --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_config.py @@ -0,0 +1,127 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_security_ipsec_config +short_description: NetApp ONTAP module to configure IPsec config. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.1.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Enable or disable IPsec config. + - Configure replay window. +options: + state: + description: + - modify IPsec configuration, only present is supported. + choices: ['present'] + type: str + default: present + enabled: + description: + - Indicates whether or not IPsec is enabled. + type: bool + required: false + replay_window: + description: + - Replay window size in packets, where 0 indicates that the relay window is disabled. + type: str + required: false + choices: ['0', '64', '128', '256', '512', '1024'] + +notes: + - Supports check_mode. + - Only supported with REST and requires ONTAP 9.8 or later. +""" + +EXAMPLES = """ + - name: Enable IPsec config and set replay_window. + netapp.ontap.na_ontap_security_ipsec_config: + enabled: True + replay_window: 64 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Disable IPsec config. + netapp.ontap.na_ontap_security_ipsec_config: + enabled: False + replay_window: 64 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSecurityIPsecConfig: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + enabled=dict(required=False, type='bool'), + replay_window=dict(required=False, type='str', choices=['0', '64', '128', '256', '512', '1024']) + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ipsec_config:', 9, 8) + + def get_security_ipsec_config(self): + """Get IPsec config details""" + record, error = rest_generic.get_one_record(self.rest_api, 'security/ipsec', None, 'enabled,replay_window') + if error: + self.module.fail_json(msg="Error fetching security IPsec config: %s" % to_native(error), exception=traceback.format_exc()) + if record: + return { + 'enabled': record.get('enabled'), + 'replay_window': record.get('replay_window') + } + return None + + def modify_security_ipsec_config(self, modify): + """ + Modify security ipsec config + """ + dummy, error = rest_generic.patch_async(self.rest_api, 'security/ipsec', None, modify) + if error: + self.module.fail_json(msg='Error modifying security IPsec config: %s.' % to_native(error), exception=traceback.format_exc()) + + def apply(self): + modify = self.na_helper.get_modified_attributes(self.get_security_ipsec_config(), self.parameters) + if self.na_helper.changed and not self.module.check_mode: + self.modify_security_ipsec_config(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + ipsec_config = NetAppOntapSecurityIPsecConfig() + ipsec_config.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py new file mode 100644 index 000000000..e02e0df64 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ipsec_policy.py @@ -0,0 +1,458 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_security_ipsec_policy +short_description: NetApp ONTAP module to create, modify or delete security IPsec policy. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.1.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Create, modify or delete security IPsec policy. +options: + state: + description: + - Create or delete security IPsec policy. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + - Name of the security IPsec policy + type: str + required: true + action: + description: + - Action for the IPsec policy. + - Cannot modify after create. + type: str + choices: ['bypass', 'discard', 'esp_transport', 'esp_udp'] + authentication_method: + description: + - Authentication method for the IPsec policy. + - Supported from 9.10.1 or later. + - Cannot modify after create. + type: str + choices: ['none', 'psk', 'pki'] + certificate: + description: + - Certificate for the IPsec policy. + - Supported from 9.10.1 or later. + - Required when C(authentication_method) is 'pki' in create. + type: str + enabled: + description: + - Indicates whether or not the policy is enabled. + type: bool + ipspace: + description: + - IPspace name where C(svm) exist. + type: str + local_endpoint: + description: + - Local endpoint for the IPsec policy. + type: dict + suboptions: + address: + description: + - IPv4 or IPv6 address. + type: str + required: true + netmask: + description: + - Input as netmask length (16) or IPv4 mask (255.255.0.0). + - For IPv6, the default value is 64 with a valid range of 1 to 127. + type: str + required: true + port: + description: + - Application port to be covered by the IPsec policy, example 23. + type: str + remote_endpoint: + description: + - remote endpoint for the IPsec policy. + type: dict + suboptions: + address: + description: + - IPv4 or IPv6 address. + type: str + required: true + netmask: + description: + - Input as netmask length (16) or IPv4 mask (255.255.0.0). + - For IPv6, the default value is 64 with a valid range of 1 to 127. + type: str + required: true + port: + description: + - Application port to be covered by the IPsec policy, example 23 or 23-23. + type: str + local_identity: + description: + - local IKE endpoint's identity for authentication purpose. + type: str + remote_identity: + description: + - remote IKE endpoint's identity for authentication purpose. + type: str + protocol: + description: + - protocol to be protected by by this policy. + - example 'any' or '0', 'tcp', 'udp' or protocol number. + type: str + secret_key: + description: + - Pre-shared key for IKE negotiation. + - Required when C(authentication_method) is 'psk' in create. + - Cannot modify after create. + type: str + svm: + description: + - The name of the SVM. + - Required when creating security IPsec policy. + type: str + +notes: + - Supports check_mode. + - Only supported with REST and requires ONTAP 9.8 or later. +""" + +EXAMPLES = """ + - name: Create security IPsec policy with pre-shared Keys. + netapp.ontap.na_ontap_security_ipsec_policy: + name: ipsec_policy_psk + ipspace: Default + svm: ansibleSVM + authentication_method: psk + secret_key: "{{ secret_key }}" + action: esp_transport + local_endpoint: + address: 10.23.43.23 + netmask: 24 + port: 201 + remote_endpoint: + address: 10.23.43.30 + netmask: 24 + port: 205 + protocol: tcp + enabled: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Create security IPsec policy with certificates. + netapp.ontap.na_ontap_security_ipsec_policy: + name: ipsec_policy_pki + ipspace: Default + svm: ansibleSVM + authentication_method: pki + certificate: "{{ cert_name }}" + action: esp_transport + local_endpoint: + address: 10.23.43.23 + netmask: 24 + port: 201 + remote_endpoint: + address: 10.23.43.30 + netmask: 24 + port: 205 + protocol: tcp + enabled: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Create security IPsec policy without psk or certificates. + netapp.ontap.na_ontap_security_ipsec_policy: + name: ipsec_policy_none + ipspace: Default + svm: ansibleSVM + action: bypass + local_endpoint: + address: 10.23.43.23 + netmask: 24 + port: 201 + remote_endpoint: + address: 10.23.43.30 + netmask: 24 + port: 205 + protocol: tcp + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Modify security IPsec policy local, remote end_point. + netapp.ontap.na_ontap_security_ipsec_policy: + name: ipsec_policy_pki + ipspace: Default + svm: ansibleSVM + authentication_method: pki + certificate: "{{ cert_name }}" + action: esp_transport + local_endpoint: + address: 10.23.43.50 + netmask: 24 + port: 201 + remote_endpoint: + address: 10.23.43.60 + netmask: 24 + port: 205 + protocol: tcp + enabled: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Modify security IPsec protocol, enable options. + netapp.ontap.na_ontap_security_ipsec_policy: + name: ipsec_policy_pki + ipspace: Default + svm: ansibleSVM + authentication_method: pki + certificate: "{{ cert_name }}" + action: esp_transport + local_endpoint: + address: 10.23.43.50 + netmask: 24 + port: 201 + remote_endpoint: + address: 10.23.43.60 + netmask: 24 + port: 205 + protocol: udp + enabled: false + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + + - name: Delete security IPsec policy. + netapp.ontap.na_ontap_security_ipsec_policy: + name: ipsec_policy_pki + svm: ansibleSVM + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, netapp_ipaddress + + +class NetAppOntapSecurityIPsecPolicy: + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + action=dict(required=False, type='str', choices=['bypass', 'discard', 'esp_transport', 'esp_udp']), + authentication_method=dict(required=False, type='str', choices=['none', 'psk', 'pki']), + certificate=dict(required=False, type='str'), + enabled=dict(required=False, type='bool'), + ipspace=dict(required=False, type='str'), + local_endpoint=dict(required=False, type='dict', options=dict( + address=dict(required=True, type='str'), + netmask=dict(required=True, type='str'), + port=dict(required=False, type='str') + )), + local_identity=dict(required=False, type='str'), + remote_identity=dict(required=False, type='str'), + protocol=dict(required=False, type='str'), + remote_endpoint=dict(required=False, type='dict', options=dict( + address=dict(required=True, type='str'), + netmask=dict(required=True, type='str'), + port=dict(required=False, type='str') + )), + secret_key=dict(required=False, type='str', no_log=True), + svm=dict(required=False, type='str') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[('secret_key', 'certificate')], + required_if=[ + ('authentication_method', 'psk', ['secret_key']), + ('authentication_method', 'pki', ['certificate']) + ], + supports_check_mode=True + ) + self.uuid = None + self.na_helper = NetAppModule(self.module) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ipsec_policy', 9, 8) + partially_supported_rest_properties = [['authentication_method', (9, 10, 1)], ['certificate', (9, 10, 1)]] + self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties) + self.parameters = self.na_helper.filter_out_none_entries(self.parameters) + if self.parameters['state'] == 'present': + self.validate_ipsec() + + def validate_ipsec(self): + """ + validate ipsec options. + """ + for end_point in ['local_endpoint', 'remote_endpoint']: + if self.parameters.get(end_point): + self.parameters[end_point]['address'] = netapp_ipaddress.validate_and_compress_ip_address(self.parameters[end_point]['address'], self.module) + self.parameters[end_point]['netmask'] = str(netapp_ipaddress.netmask_to_netmask_length(self.parameters[end_point]['address'], + self.parameters[end_point]['netmask'], self.module)) + # ONTAP returns port in port ranges. 120 set is returned as 120-120 + if self.parameters[end_point].get('port') and '-' not in self.parameters[end_point]['port']: + self.parameters[end_point]['port'] = self.parameters[end_point]['port'] + '-' + self.parameters[end_point]['port'] + # if action is bypass/discard and auth_method is psk/pki then security_key/certificate will be ignored in REST. + # so delete the authentication_method, secret_key and certificate to avoid idempotent issue. + if self.parameters.get('action') in ['bypass', 'discard'] and self.parameters.get('authentication_method') != 'none': + msg = "The IPsec action is %s, which does not provide packet protection. The authentication_method and " % self.parameters['action'] + self.parameters.pop('authentication_method', None) + if self.parameters.get('secret_key'): + del self.parameters['secret_key'] + self.module.warn(msg + 'secret_key options are ignored') + if self.parameters.get('certificate'): + del self.parameters['certificate'] + self.module.warn(msg + 'certificate options are ignored') + # mapping protocol number to protocol to avoid idempotency issue. + protocols_info = {'6': 'tcp', '17': 'udp', '0': 'any'} + if self.parameters.get('protocol') in protocols_info: + self.parameters['protocol'] = protocols_info[self.parameters['protocol']] + + def get_security_ipsec_policy(self): + """ + Get security ipsec policy. + """ + api = 'security/ipsec/policies' + query = { + 'name': self.parameters['name'], + 'fields': 'uuid,enabled,local_endpoint,local_identity,remote_identity,protocol,remote_endpoint,action' + } + if self.parameters.get('authentication_method'): + query['fields'] += ',authentication_method' + if self.parameters.get('certificate'): + query['fields'] += ',certificate' + if self.parameters.get('svm'): + query['svm.name'] = self.parameters['svm'] + else: + query['scope'] = 'cluster' + # Cannot get current IPsec policy with ipspace - burt1519419 + # if self.parameters.get('ipspace'): + # query['ipspace.name'] = self.parameters['ipspace'] + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching security ipsec policy %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if record: + self.uuid = record['uuid'] + return { + 'action': self.na_helper.safe_get(record, ['action']), + 'authentication_method': self.na_helper.safe_get(record, ['authentication_method']), + 'certificate': self.na_helper.safe_get(record, ['certificate', 'name']), + 'enabled': self.na_helper.safe_get(record, ['enabled']), + 'local_endpoint': self.na_helper.safe_get(record, ['local_endpoint']), + 'local_identity': self.na_helper.safe_get(record, ['local_identity']), + 'protocol': self.na_helper.safe_get(record, ['protocol']), + 'remote_endpoint': self.na_helper.safe_get(record, ['remote_endpoint']), + 'remote_identity': self.na_helper.safe_get(record, ['remote_identity']) + } + return None + + def create_security_ipsec_policy(self): + """ + Create security ipsec policy + """ + api = 'security/ipsec/policies' + dummy, error = rest_generic.post_async(self.rest_api, api, self.form_create_modify_body()) + if error: + self.module.fail_json(msg='Error creating security ipsec policy %s: %s.' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_security_ipsec_policy(self, modify): + """ + Modify security ipsec policy. + """ + api = 'security/ipsec/policies' + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, self.form_create_modify_body(modify)) + if error: + self.module.fail_json(msg='Error modifying security ipsec policy %s: %s.' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_security_ipsec_policy(self): + """ + Delete security ipsec policy. + """ + api = 'security/ipsec/policies' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid) + if error: + self.module.fail_json(msg='Error deleting security ipsec policy %s: %s.' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def form_create_modify_body(self, params=None): + """ + Returns body for create or modify. + """ + if params is None: + params = self.parameters + body = {} + keys = ['name', 'action', 'authentication_method', 'enabled', 'secret_key', + 'local_endpoint', 'local_identity', 'remote_identity', 'protocol', 'remote_endpoint'] + for key in keys: + if key in params: + body[key] = self.parameters[key] + if 'certificate' in params: + body['certificate.name'] = self.parameters['certificate'] + if 'ipspace' in params: + body['ipspace.name'] = self.parameters['ipspace'] + if 'svm' in params: + body['svm.name'] = self.parameters['svm'] + return body + + def apply(self): + current = self.get_security_ipsec_policy() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + error_keys = [key for key in modify if key in ['authentication_method', 'action']] + if error_keys: + plural = 's' if len(error_keys) > 1 else '' + self.module.fail_json(msg="Error: cannot modify option%s - %s." % (plural, ", ".join(error_keys))) + # Expected ONTAP to throw error but restarts instead, if try to set certificate where auth_method is none. + if modify.get('certificate') and current['authentication_method'] == 'none': + self.module.fail_json(msg="Error: cannot set certificate for IPsec policy where authentication_method is none") + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_security_ipsec_policy() + elif cd_action == 'delete': + self.delete_security_ipsec_policy() + else: + self.modify_security_ipsec_policy(modify) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + ipsec_obj = NetAppOntapSecurityIPsecPolicy() + ipsec_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py new file mode 100644 index 000000000..fcab16e40 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_key_manager.py @@ -0,0 +1,640 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ + +module: na_ontap_security_key_manager + +short_description: NetApp ONTAP security key manager. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Add or delete or setup key management on NetApp ONTAP. + - With ZAPI, this module is limited to adding or removing external key servers. It does not manage certificates. + - With REST, this module can create an external key manager and certificates are required for creation. + - With REST, onboard key manager is supported. + +options: + + state: + description: + - Whether the specified key manager should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + ip_address: + description: + - The IP address of the external key management server. + - Mutually exclusive with external and onboard options. + - Required with ZAPI. + required: false + type: str + + tcp_port: + description: + - The TCP port on which the key management server listens for incoming connections. + default: 5696 + type: int + + node: + description: + - The node which key management server runs on. + - Ignored, a warning is raised if present. + - Deprecated as of 21.22.0, as it was never used. + type: str + + external: + description: + - Configures external key manager. + - Not supported with ZAPI. + - Mutually exclusive with ip_address and onboard. + type: dict + suboptions: + client_certificate: + description: + - Client certificate name (already installed in the cluster or SVM). + - Required when creating an external key manager. + type: str + server_ca_certificates: + description: + - List of server CA certificate names (already installed in the cluster or SVM). + - Required when creating an external key manager. + type: list + elements: str + servers: + description: + - List of external key servers for key management. + - Format - ip_address:port or FQDN:port. port defaults to the value of C(tcp_port) when not provided. + - The order in the list is not preserved if the key-manager already exists. + type: list + elements: str + version_added: 21.23.0 + + onboard: + description: + - Configures onboard key management. + - Not supported with ZAPI. + - Mutually exclusive with ip_address and external . + type: dict + suboptions: + from_passphrase: + description: + - The cluster-wide passphrase. + - Ignored if the onboard key manager does not already exists. + - Required to change the passphrase. + type: str + passphrase: + description: + - The cluster-wide passphrase. + type: str + synchronize: + description: + - Synchronizes missing onboard keys on any node in the cluster. + type: bool + default: false + version_added: 21.23.0 + + vserver: + description: + - SVM name when using an external key manager. + - Not supported for onboard key manager. + - Not supported with ZAPI. + type: str + version_added: 21.23.0 + +notes: + - Though C(node) is accepted as a parameter, it is not used in the module. + - Supports check_mode. + - Only supported at cluster level with ZAPI, or for onboard. + - ZAPI supports relies on deprecated APIs since ONTAP 9.6. +""" + +EXAMPLES = """ + # Assuming module_defaults are used to set up hostname, username, password, https, validate_certs + + - name: Delete Key Manager + tags: + - delete + netapp.ontap.na_ontap_security_key_manager: + state: absent + + - name: Add Key Manager - ZAPI + tags: + - add + netapp.ontap.na_ontap_security_key_manager: + ip_address: 0.0.0.0 + + - name: Add/Modify external Key Manager - REST + netapp.ontap.na_ontap_security_key_manager: + state: present + external: + servers: 10.10.10.10:5696 + client_certificate: kmip_client + server_ca_certificates: kmip_ca + vserver: "{{ vserver | default(omit) }}" + + - name: Add/Modify external Key Manager - REST + netapp.ontap.na_ontap_security_key_manager: + state: present + external: + servers: 10.10.10.10:5696,10.10.10.10:5697,10.10.10.11:5696 + client_certificate: kmip_client + server_ca_certificates: kmip_ca + vserver: "{{ vserver | default(omit) }}" + + - name: Add onboard Key Manager + netapp.ontap.na_ontap_security_key_manager: + state: present + onboard: + passphrase: "hello, le soleil brille, brille, brille!" + + - name: Change passphrase for onboard Key Manager + netapp.ontap.na_ontap_security_key_manager: + state: present + onboard: + from_passphrase: "hello, le soleil brille, brille, brille!" + passphrase: "hello, le soleil brille, brille, brille! - 2" + synchronize: true +""" + +RETURN = """ +""" + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSecurityKeyManager: + """class with key manager operations""" + + def __init__(self): + """Initialize module parameters""" + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + ip_address=dict(required=False, type='str'), + node=dict(required=False, type='str'), + tcp_port=dict(required=False, type='int', default=5696), + external=dict(type='dict', options=dict( + client_certificate=dict(type='str'), + server_ca_certificates=dict(type='list', elements='str'), + servers=dict(type='list', elements='str'), + )), + onboard=dict(type='dict', options=dict( + from_passphrase=dict(type='str', no_log=True), + passphrase=dict(type='str', no_log=True), + synchronize=dict(type='bool', default=False), + )), + vserver=dict(type='str'), + ) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ('external', 'onboard'), + ('ip_address', 'onboard'), + ('ip_address', 'external'), + ('onboard', 'vserver'), + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if 'node' in self.parameters: + self.module.warn('The option "node" is deprecated and should not be used.') + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if self.use_rest: + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_key_manager', 9, 7) + self.uuid = None + self.scope, self.resource = self.set_scope(self.parameters.get('vserver')) + # expand parameters to match REST returned info + self.update_parameters_rest() + else: + rest_only = [x for x in ('external', 'onboard', 'vserver') if x in self.parameters] + if rest_only: + self.module.fail_json(msg='Error: REST is required for %s option%s.' + % (', '.join(rest_only), 's' if len(rest_only) > 1 else '')) + if 'ip_address' not in self.parameters: + self.module.fail_json(msg='missing required arguments: ip_address') + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def add_port(self, server): + """ ONTAP automatically adds :5696 when the port is not present + We need to add it to make the module idempotent + """ + return server if ':' in server else '%s:%s' % (server, self.parameters['tcp_port']) + + def update_parameters_rest(self): + """ expand parameters to match REST returned info + transform legacy input + """ + if self.scope == 'svm': + self.parameters['svm'] = {'name': self.parameters.pop('vserver')} + servers = self.na_helper.safe_get(self.parameters, ['external', 'servers']) + if servers: + # eliminate any empty entry and add port when needed + self.parameters['external']['servers'] = [{'server': self.add_port(server)} for server in servers if server] + + ip_address = self.parameters.pop('ip_address', None) + if ip_address: + ip_address += ':%s' % self.parameters.pop('tcp_port') + self.parameters['external'] = {'servers': [{'server': ip_address}]} + + @staticmethod + def set_scope(vserver): + """ define the scope, and a user friendly resource name""" + return ( + 'cluster' if vserver is None else 'svm', + 'cluster' if vserver is None else 'vserver: %s' % vserver + ) + + def get_key_manager(self): + """ + get key manager by ip address. + :return: a dict of key manager + """ + if self.use_rest: + return self.get_key_manager_rest() + key_manager_info = netapp_utils.zapi.NaElement('security-key-manager-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'key-manager-info', **{'key-manager-ip-address': self.parameters['ip_address']}) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + key_manager_info.add_child_elem(query) + + try: + result = self.cluster.invoke_successfully(key_manager_info, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching key manager: %s' % to_native(error), + exception=traceback.format_exc()) + + return_value = None + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + key_manager = result.get_child_by_name('attributes-list').get_child_by_name('key-manager-info') + return_value = {} + if key_manager.get_child_by_name('key-manager-ip-address'): + return_value['ip_address'] = key_manager.get_child_content('key-manager-ip-address') + if key_manager.get_child_by_name('key-manager-server-status'): + return_value['server_status'] = key_manager.get_child_content('key-manager-server-status') + if key_manager.get_child_by_name('key-manager-tcp-port'): + return_value['tcp_port'] = int(key_manager.get_child_content('key-manager-tcp-port')) + + return return_value + + def key_manager_setup(self): + """ + set up external key manager. + deprecated as of ONTAP 9.6. + """ + key_manager_setup = netapp_utils.zapi.NaElement('security-key-manager-setup') + # if specify on-boarding passphrase, it is on-boarding key management. + # it not, then it's external key management. + try: + self.cluster.invoke_successfully(key_manager_setup, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error setting up key manager: %s' % to_native(error), + exception=traceback.format_exc()) + + def create_key_manager(self): + """ + add key manager. + """ + if self.use_rest: + return self.create_key_manager_rest() + key_manager_create = netapp_utils.zapi.NaElement('security-key-manager-add') + key_manager_create.add_new_child('key-manager-ip-address', self.parameters['ip_address']) + if self.parameters.get('tcp_port'): + key_manager_create.add_new_child('key-manager-tcp-port', str(self.parameters['tcp_port'])) + try: + self.cluster.invoke_successfully(key_manager_create, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating key manager: %s' % to_native(error), + exception=traceback.format_exc()) + + def delete_key_manager(self): + """ + delete key manager. + """ + if self.use_rest: + return self.delete_key_manager_rest() + key_manager_delete = netapp_utils.zapi.NaElement('security-key-manager-delete') + key_manager_delete.add_new_child('key-manager-ip-address', self.parameters['ip_address']) + try: + self.cluster.invoke_successfully(key_manager_delete, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting key manager: %s' % to_native(error), + exception=traceback.format_exc()) + + def _get_security_certificate_uuid_rest_any(self, query, fields): + api = 'security/certificates' + query['scope'] = self.scope + if self.scope == 'svm': + # try first at SVM level + query['svm.name'] = self.parameters['svm']['name'] + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if record and error is None: + return record, error + # retry at cluster scope + del query['svm.name'] + query['scope'] = 'cluster' + return rest_generic.get_one_record(self.rest_api, api, query, fields) + + def get_security_certificate_uuid_rest_97(self, name, type): + query = {'common_name': name, 'type': type} + fields = 'uuid,common_name,type' + return self._get_security_certificate_uuid_rest_any(query, fields) + + def get_security_certificate_uuid_rest_98(self, name): + query = {'name': name} + fields = 'uuid,name,common_name,type' + return self._get_security_certificate_uuid_rest_any(query, fields) + + def get_security_certificate_uuid_rest(self, name, type): + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8): + record, error = self.get_security_certificate_uuid_rest_98(name) + message = 'certificate %s not found, retrying with common_name and type %s.'\ + % (name, type) + else: + record, error = None, None + message = 'name is not supported in 9.6 or 9.7, using common_name %s and type %s.'\ + % (name, type) + if not error and not record: + self.module.warn(message) + record, error = self.get_security_certificate_uuid_rest_97(name, type) + if not error and not record: + error = 'not found' + if error: + self.module.fail_json(msg='Error fetching security certificate info for %s of type: %s on %s: %s.' % (name, type, self.resource, error)) + return record['uuid'] + + def get_key_manager_rest(self): + api = 'security/key-managers' + query = {'scope': self.scope} + fields = 'status,external,uuid,onboard' + if self.scope == 'svm': + query['svm.name'] = self.parameters['svm']['name'] + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + if self.scope == 'svm' and 'SVM "%s" does not exist' % self.parameters['svm']['name'] in error: + return None + self.module.fail_json(msg='Error fetching key manager info for %s: %s' % (self.resource, error)) + if record: + self.uuid = record['uuid'] + if 'external' in record and (self.na_helper.safe_get(record, ['onboard', 'enabled']) is False): + del record['onboard'] + if 'external' in record and 'servers' in record['external']: + # remove extra fields that are readonly and not relevant for modify + record['external']['servers'] = [{'server': server['server']} for server in record['external']['servers']] + self.na_helper.remove_hal_links(record) + + return record + + def create_body(self, params): + if 'external' in params: + body = {'external': self.na_helper.filter_out_none_entries(params['external'])} + elif 'onboard' in params: + body = {'onboard': self.na_helper.filter_out_none_entries(params['onboard'])} + body['onboard'].pop('from_passphrase', None) + else: + return + if 'svm' in self.parameters: + body['svm'] = self.na_helper.filter_out_none_entries(self.parameters['svm']) + return body + + def create_key_manager_rest(self, retrying=None): + api = 'security/key-managers' + body = self.create_body(self.parameters) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + # ONTAP returns no record if external manager is configured but no server is present + if not retrying and ('already has external key management configured' in error + or 'External key management already configured' in error): + self.module.warn("deleting and recreating key manager as no key server is configured.") + self.delete_key_manager_rest() + time.sleep(5) + return self.create_key_manager_rest('retrying') + resource = 'cluster' if self.parameters.get('vserver') is None else self.parameters['vserver'] + self.module.fail_json(msg='Error creating key manager for %s: %s' % (resource, error)) + + def modify_key_manager_rest(self, modify, current=None, return_error=False): + # external key servers cannot be updated in PATCH, they are handled later + key_servers = self.na_helper.safe_get(modify, ['external', 'servers']) + if key_servers: + del modify['external']['servers'] + if not modify['external']: + del modify['external'] + if modify: + api = 'security/key-managers' + body = self.create_body(modify) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + if return_error: + return error + resource = 'cluster' if self.parameters.get('vserver') is None else self.parameters['vserver'] + self.module.fail_json(msg='Error modifying key manager for %s: %s' % (resource, error)) + if key_servers: + self.update_key_server_list(current) + return None + + def check_passphrase_rest(self, passphrase): + """ API does not return the passphrase + In order to check for idempotency, check if the desired passphrase is already active""" + params = { + 'onboard': { + 'existing_passphrase': passphrase, + 'passphrase': passphrase, + } + } + error = self.modify_key_manager_rest(params, return_error=True) + if not error: + return 'unexpected_success in check_passphrase_rest', error + if 'Cluster-wide passphrase is incorrect.' in error: + return 'incorrect_passphrase', error + if 'New passphrase cannot be same as the old passphrase.' in error: + return 'current_passphrase', error + self.module.warn('Unexpected response in check_passphrase_rest: %s' % error) + return 'unexpected_error in check_passphrase_rest', error + + def delete_key_manager_rest(self): + api = 'security/key-managers' + if self.uuid is None: + # ONTAP does not return a record when an external key manager is configured without any external server + query = {'scope': self.scope} + if self.scope == 'svm': + query['svm.name'] = self.parameters['svm']['name'] + else: + query = None + dummy, error = rest_generic.delete_async(self.rest_api, api, self.uuid, query) + if error: + resource = 'cluster' if self.parameters.get('vserver') is None else self.parameters['vserver'] + self.module.fail_json(msg='Error deleting key manager for %s: %s' % (resource, error)) + + def validate_delete_action(self, current): + return + + def validate_modify(self, current, modify): + error = None if self.use_rest else 'modify is not supported with ZAPI, new values: %s, current values: %s' % (modify, current) + if error: + self.module.fail_json(msg='Error, cannot modify existing configuraton: %s' % error) + + def substitute_certificate_uuids(self, params): + if 'external' not in params: + return + certificate = self.na_helper.safe_get(params, ['external', 'client_certificate']) + if certificate: + params['external']['client_certificate'] = {'uuid': self.get_security_certificate_uuid_rest(certificate, 'client')} + certificates = self.na_helper.safe_get(params, ['external', 'server_ca_certificates']) + if certificates: + params['external']['server_ca_certificates'] = [{'uuid': self.get_security_certificate_uuid_rest(certificate, 'server_ca')} + for certificate in certificates] + + def is_passphrase_update_required(self, passphrase, from_passphrase): + check_new, __ = self.check_passphrase_rest(passphrase) + if check_new == 'current_passphrase': + return False + check_old, error = self.check_passphrase_rest(from_passphrase) + if check_old == 'incorrect_passphrase' and check_new == 'incorrect_passphrase': + self.module.fail_json(msg='Error: neither from_passphrase nor passphrase match installed passphrase: %s' % error) + # if check_old is current, we're good to change the passphrase. For other errors, we'll just try again, we already warned. + return True + + def force_onboard_actions(self): + """ synchronize and passphrase are not returned in GET so we need to be creative """ + if 'onboard' not in self.parameters: + return None, None + passphrase = self.na_helper.safe_get(self.parameters, ['onboard', 'passphrase']) + # do we need to synchronize + modify_sync = None + if self.na_helper.safe_get(self.parameters, ['onboard', 'synchronize']): + if passphrase is None: + self.module.fail_json(msg='Error: passphrase is required for synchronize.') + modify_sync = {'onboard': { + 'synchronize': True, + 'existing_passphrase': passphrase + }} + # do we need to update the passphrase + modify_passphrase = None + from_passphrase = self.na_helper.safe_get(self.parameters, ['onboard', 'from_passphrase']) + if passphrase and not from_passphrase: + self.module.warn('passphrase is ignored') + if not passphrase and from_passphrase and not modify_sync: + self.module.warn('from_passphrase is ignored') + if passphrase and from_passphrase and self.is_passphrase_update_required(passphrase, from_passphrase): + modify_passphrase = {'onboard': { + 'passphrase': passphrase, + 'existing_passphrase': from_passphrase + }} + # wrapping up + if modify_passphrase or modify_sync: + self.na_helper.changed = True + return modify_passphrase, modify_sync + + def validate_type_change(self, current): + """present moving from onboard to external and reciprocally""" + error = None + if 'onboard' in current and 'external' in self.parameters: + error = 'onboard key-manager is already installed, it needs to be deleted first.' + if 'external' in current and 'onboard' in self.parameters: + error = 'external key-manager is already installed, it needs to be deleted first.' + if error: + self.module.fail_json(msg='Error, cannot modify existing configuraton: %s' % error) + + def local_get_modified_attributes(self, current): + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if current and 'external' in self.parameters and not self.na_helper.safe_get(modify, ['external', 'servers']): + current_servers = self.na_helper.safe_get(current, ['external', 'servers']) + desired_servers = self.na_helper.safe_get(self.parameters, ['external', 'servers']) + # order matters for key servers + if current_servers != desired_servers: + if 'external' not in modify: + modify['external'] = {} + modify['external']['servers'] = desired_servers + self.na_helper.changed = True + return modify + + def add_external_server_rest(self, server): + api = 'security/key-managers/%s/key-servers' % self.uuid + body = { + 'server': server + } + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error adding external key server %s: %s' % (server, error)) + + def remove_external_server_rest(self, server): + api = 'security/key-managers/%s/key-servers' % self.uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, server) + if error: + self.module.fail_json(msg='Error removing external key server %s: %s' % (server, error)) + + def update_key_server_list(self, current): + desired_servers = self.na_helper.safe_get(self.parameters, ['external', 'servers']) + if desired_servers is None: + return + desired_servers = [server['server'] for server in desired_servers] + current_servers = self.na_helper.safe_get(current, ['external', 'servers']) or [] + current_servers = [server['server'] for server in current_servers] + for server in current_servers: + if server not in desired_servers: + self.remove_external_server_rest(server) + for server in desired_servers: + if server not in current_servers: + self.add_external_server_rest(server) + + def apply(self): + if not self.use_rest: + self.key_manager_setup() + current = self.get_key_manager() + if current: + self.validate_type_change(current) + if self.parameters['state'] == 'present': + self.substitute_certificate_uuids(self.parameters) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.local_get_modified_attributes(current) if cd_action is None else None + # with onboard, changing a passphrase or synchronizing cannot be done in the same PATCH request + modify_passphrase, modify_sync = self.force_onboard_actions() if cd_action is None and current else (None, None) + if cd_action == 'delete' and self.use_rest: + self.validate_delete_action(current) + if modify: + self.validate_modify(current, modify) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_key_manager() + elif cd_action == 'delete': + self.delete_key_manager() + elif modify: + self.modify_key_manager_rest(modify, current) + elif modify_passphrase: + self.modify_key_manager_rest(modify_passphrase) + elif modify_sync: + self.modify_key_manager_rest(modify_sync) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """Apply volume operations from playbook""" + obj = NetAppOntapSecurityKeyManager() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py new file mode 100644 index 000000000..e1f21246c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_security_ssh.py @@ -0,0 +1,197 @@ +#!/usr/bin/python + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +''' +na_ontap_security_ssh +''' + + +DOCUMENTATION = ''' +module: na_ontap_security_ssh +short_description: NetApp ONTAP security ssh +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.24.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Modify SSH server configuration of SVM on ONTAP +options: + state: + description: + - SSH service is always enabled. + choices: ['present'] + type: str + default: present + vserver: + description: + - Name of the vserver to use for vserver scope. + - If absent or null, cluster scope is assumed. + type: str + ciphers: + description: + - Ciphers for encrypting the data. + - Example list [ aes256_ctr, aes192_ctr, aes128_ctr, aes256_cbc, aes192_cbc ] + type: list + elements: str + key_exchange_algorithms: + description: + - Key exchange algorithms. + - Example list [ diffie_hellman_group_exchange_sha256, diffie_hellman_group14_sha1 ] + type: list + elements: str + mac_algorithms: + description: + - MAC algorithms. + - Example list [ hmac_sha1, hmac_sha2_512_etm ] + type: list + elements: str + max_authentication_retry_count: + description: + - Maximum authentication retries allowed before closing the connection. + - Minimum value is 2 and maximum is 6. + - Default value is 2. + type: int + +notes: + - Removing all SSH key exchange algorithms is not supported. SSH login would fail. + - This module is only for REST. +''' + +EXAMPLES = """ + - name: Modify SSH algorithms + netapp.ontap.na_ontap_security_ssh: + vserver: vserverName + ciphers: ["aes256_ctr", "aes192_ctr"] + key_exchange_algorithms: ["diffie_hellman_group_exchange_sha256"] + mac_algorithms: ["hmac_sha1"] + max_authentication_retry_count: 6 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify SSH algorithms at cluster level + netapp.ontap.na_ontap_security_ssh: + vserver: + ciphers: ["aes256_ctr", "aes192_ctr"] + key_exchange_algorithms: ["diffie_hellman_group_exchange_sha256"] + mac_algorithms: ["hmac_sha1"] + max_authentication_retry_count: 6 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify SSH algorithms at cluster level + netapp.ontap.na_ontap_security_ssh: + ciphers: ["aes256_ctr", "aes192_ctr"] + key_exchange_algorithms: ["diffie_hellman_group_exchange_sha256"] + mac_algorithms: ["hmac_sha1"] + max_authentication_retry_count: 6 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSecuritySSH: + """ object initialize and class methods """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + vserver=dict(required=False, type='str'), + ciphers=dict(required=False, type='list', elements='str'), + key_exchange_algorithms=dict(required=False, type='list', elements='str', no_log=False), + mac_algorithms=dict(required=False, type='list', elements='str'), + max_authentication_retry_count=dict(required=False, type='int'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule(self) + self.parameters = self.na_helper.set_parameters(self.module.params) + self.svm_uuid = None + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_security_ssh', 9, 10, 1) + self.safe_strip() + + def safe_strip(self): + """ strip the left and right spaces of string and also removes an empty string""" + for option in ('ciphers', 'key_exchange_algorithms', 'mac_algorithms'): + if option in self.parameters: + self.parameters[option] = [item.strip() for item in self.parameters[option] if len(item.strip())] + # Validation of input parameters + if self.parameters[option] == []: + self.module.fail_json(msg="Removing all SSH %s is not supported. SSH login would fail. " + "There must be at least one %s associated with the SSH configuration." % (option, option)) + return + + def get_security_ssh_rest(self): + ''' + Retrieves the SSH server configuration for the SVM or cluster. + ''' + fields = ['key_exchange_algorithms', 'ciphers', 'mac_algorithms', 'max_authentication_retry_count'] + query = {} + if self.parameters.get('vserver'): + api = 'security/ssh/svms' + query['svm.name'] = self.parameters['vserver'] + fields.append('svm.uuid') + else: + api = 'security/ssh' + query['fields'] = ','.join(fields) + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg=error) + return record + + def modify_security_ssh_rest(self, modify): + ''' + Updates the SSH server configuration for the specified SVM. + ''' + if self.parameters.get('vserver'): + if self.svm_uuid is None: + self.module.fail_json(msg="Error: no uuid found for the SVM") + api = 'security/ssh/svms' + else: + api = 'security/ssh' + body = {} + for option in ('ciphers', 'key_exchange_algorithms', 'mac_algorithms', 'max_authentication_retry_count'): + if option in modify: + body[option] = modify[option] + if body: + dummy, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body) + if error: + self.module.fail_json(msg=error) + + def apply(self): + current = self.get_security_ssh_rest() + self.svm_uuid = self.na_helper.safe_get(current, ['svm', 'uuid']) if current and self.parameters.get('vserver') else None + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + self.modify_security_ssh_rest(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + """ Create object and call apply """ + ssh_obj = NetAppOntapSecuritySSH() + ssh_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py new file mode 100644 index 000000000..f2969f720 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_policy.py @@ -0,0 +1,339 @@ +#!/usr/bin/python + +# (c) 2021-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_service_policy +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_service_policy + +short_description: NetApp ONTAP service policy configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 21.7.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Add, modify, or remove service policies. + - This module requires ONTAP 9.8 or later, and only supports REST. + +options: + state: + description: + - Whether the specified service policy should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + name: + description: + - The name of the service policy. + required: true + type: str + ipspace: + description: + - Name of the ipspace. + - Required for cluster-scoped service policies. + - Optional for SVM-scoped service policies. + type: str + services: + description: + - List of services to associate to this service policy. + - To remove all services, use "no_service". No other value is allowed when no_service is present. + - Note - not all versions of ONTAP support all values, and new ones may be added. + - See C(known_services) and C(additional_services) to address unknow service errors. + type: list + elements: str + vserver: + description: + - The name of the vserver to use. + - Omit this option for cluster scoped user accounts. + type: str + scope: + description: + - Set to "svm" for interfaces owned by an SVM. Otherwise, set to "cluster". + - svm is assumed if vserver is set. + - cluster is assumed is vserver is not set. + type: str + choices: ['cluster', 'svm'] + known_services: + description: + - List of known services in 9.11.1 + - An error is raised if any service in C(services) is not in this list or C(new_services). + - Modify this list to restrict the services you want to support if needed. + default: [cluster_core, intercluster_core, management_core, management_autosupport, management_bgp, management_ems, management_https, management_http, + management_ssh, management_portmap, data_core, data_nfs, data_cifs, data_flexcache, data_iscsi, data_s3_server, data_dns_server, + data_fpolicy_client, management_ntp_client, management_dns_client, management_ad_client, management_ldap_client, management_nis_client, + management_snmp_server, management_rsh_server, management_telnet_server, management_ntp_server, data_nvme_tcp, backup_ndmp_control] + type: list + elements: str + version_added: 22.0.0 + additional_services: + description: + - As an alternative to updating the C(known_services), new services can be specified here. + type: list + elements: str + version_added: 22.0.0 + +notes: + - This module supports check_mode. + - This module does not support 'allowed-addresses' as REST does not support it. It defaults to 0.0.0.0/0. +''' + +EXAMPLES = """ + + - name: Create service policy + netapp.ontap.na_ontap_service_policy: + state: present + name: "{{ service_policy_name }}" + services: + - data_core + - data_nfs + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete single service policy + netapp.ontap.na_ontap_service_policy: + state: absent + name: "{{ service_policy_name }}" + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify single service policy + netapp.ontap.na_ontap_service_policy: + state: present + name: "{{ service_policy_name }}" + services: + - data_core + - data_nfs + - data_cifs + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify service policy, remove services + netapp.ontap.na_ontap_service_policy: + state: present + name: "{{ service_policy_name }}" + services: + - no_service + vserver: "{{ vserver }}" + + - name: Modify service policy at cluster level + netapp.ontap.na_ontap_service_policy: + state: present + name: "{{ service_policy_name }}" + ipspace: ansibleIpspace + scope: cluster + services: + - management_core + - management_autosupport + - management_ems +""" + +RETURN = """ +cd_action: + description: whether a public key is created or deleted. + returned: success + type: str + +modify: + description: attributes that were modified if the key already exists. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapServicePolicy: + """ + Common operations to manage public keys. + """ + + def __init__(self): + self.use_rest = False + argument_spec = netapp_utils.na_ontap_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + ipspace=dict(type='str'), + scope=dict(type='str', choices=['cluster', 'svm']), + services=dict(type='list', elements='str'), + vserver=dict(type='str'), + known_services=dict(type='list', elements='str', + default=['cluster_core', 'intercluster_core', 'management_core', 'management_autosupport', 'management_bgp', 'management_ems', + 'management_https', 'management_http', 'management_ssh', 'management_portmap', 'data_core', 'data_nfs', 'data_cifs', + 'data_flexcache', 'data_iscsi', 'data_s3_server', 'data_dns_server', 'data_fpolicy_client', 'management_ntp_client', + 'management_dns_client', 'management_ad_client', 'management_ldap_client', 'management_nis_client', + 'management_snmp_server', 'management_rsh_server', 'management_telnet_server', 'management_ntp_server', + 'data_nvme_tcp', 'backup_ndmp_control']), + additional_services=dict(type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('scope', 'cluster', ['ipspace']), + ('scope', 'svm', ['vserver']), + ('vserver', None, ['ipspace']), + ], + required_one_of=[ + ('ipspace', 'vserver') + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # REST API is required + self.rest_api = OntapRestAPI(self.module) + # check version + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_service_policy', 9, 8) + self.validate_inputs() + + def validate_inputs(self): + services = self.parameters.get('services') + if services and 'no_service' in services: + if len(services) > 1: + self.module.fail_json(msg='Error: no other service can be present when no_service is specified. Got: %s' % services) + self.parameters['services'] = [] + known_services = self.parameters.get('known_services', []) + self.parameters.get('additional_services', []) + unknown_services = [service for service in self.parameters.get('services', []) if service not in known_services] + if unknown_services: + plural = 's' if len(services) > 1 else '' + self.module.fail_json(msg='Error: unknown service%s: %s. New services may need to be added to "additional_services".' + % (plural, ','.join(unknown_services))) + + scope = self.parameters.get('scope') + if scope is None: + self.parameters['scope'] = 'cluster' if self.parameters.get('vserver') is None else 'svm' + elif scope == 'cluster' and self.parameters.get('vserver') is not None: + self.module.fail_json(msg='Error: vserver cannot be set when "scope: cluster" is specified. Got: %s' % self.parameters.get('vserver')) + elif scope == 'svm' and self.parameters.get('vserver') is None: + self.module.fail_json(msg='Error: vserver cannot be None when "scope: svm" is specified.') + + def get_service_policy(self): + api = 'network/ip/service-policies' + query = { + 'name': self.parameters['name'], + 'fields': 'name,uuid,ipspace,services,svm,scope' + } + if self.parameters.get('vserver') is None: + # vserser is empty for cluster + query['scope'] = 'cluster' + else: + query['svm.name'] = self.parameters['vserver'] + + if self.parameters.get('ipspace') is not None: + query['ipspace.name'] = self.parameters['ipspace'] + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + msg = "Error in get_service_policy: %s" % error + self.module.fail_json(msg=msg) + if record: + return { + 'uuid': record['uuid'], + 'name': record['name'], + 'ipspace': record['ipspace']['name'], + 'scope': record['scope'], + 'vserver': self.na_helper.safe_get(record, ['svm', 'name']), + 'services': record['services'] + } + return None + + def create_service_policy(self): + api = 'network/ip/service-policies' + body = { + 'name': self.parameters['name'] + } + if self.parameters.get('vserver') is not None: + body['svm.name'] = self.parameters['vserver'] + + for attr in ('ipspace', 'scope', 'services'): + value = self.parameters.get(attr) + if value is not None: + body[attr] = value + + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + msg = "Error in create_service_policy: %s" % error + self.module.fail_json(msg=msg) + + def modify_service_policy(self, current, modify): + # sourcery skip: dict-comprehension + api = 'network/ip/service-policies/%s' % current['uuid'] + modify_copy = dict(modify) + body = {} + for key in modify: + if key in ('services',): + body[key] = modify_copy.pop(key) + if modify_copy: + msg = 'Error: attributes not supported in modify: %s' % modify_copy + self.module.fail_json(msg=msg) + if not body: + msg = 'Error: nothing to change - modify called with: %s' % modify + self.module.fail_json(msg=msg) + + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + msg = "Error in modify_service_policy: %s" % error + self.module.fail_json(msg=msg) + + def delete_service_policy(self, current): + api = 'network/ip/service-policies/%s' % current['uuid'] + + dummy, error = rest_generic.delete_async(self.rest_api, api, None, None) + if error: + msg = "Error in delete_service_policy: %s" % error + self.module.fail_json(msg=msg) + + def get_actions(self): + """Determines whether a create, delete, modify action is required + """ + cd_action, modify, current = None, None, None + current = self.get_service_policy() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + return cd_action, modify, current + + def apply(self): + cd_action, modify, current = self.get_actions() + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_service_policy() + elif cd_action == 'delete': + self.delete_service_policy(current) + elif modify: + self.modify_service_policy(current, modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, extra_responses={'scope': self.module.params}) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapServicePolicy() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py new file mode 100644 index 000000000..4c0856f17 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_service_processor_network.py @@ -0,0 +1,391 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_service_processor_network +short_description: NetApp ONTAP service processor network +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Modify a ONTAP service processor network +options: + state: + description: + - Whether the specified service processor network should exist or not. + choices: ['present'] + type: str + default: present + address_type: + description: + - Specify address class. + required: true + type: str + choices: ['ipv4', 'ipv6'] + is_enabled: + description: + - Specify whether to enable or disable the service processor network. + - Required with ZAPI. + - Disable service processor network status not supported in REST. + - Setting C(ip_address), C(netmask) or C(prefix_length), C(gateway_ip_address) will enable sp network in REST. + type: bool + node: + description: + - The node where the service processor network should be enabled + required: true + type: str + dhcp: + description: + - Specify dhcp type. + - Setting C(dhcp=none) requires all of C(ip_address), C(netmask), C(gateway_ip_address) and at least one of its value different from current. + type: str + choices: ['v4', 'none'] + gateway_ip_address: + description: + - Specify the gateway ip. + type: str + ip_address: + description: + - Specify the service processor ip address. + type: str + netmask: + description: + - Specify the service processor netmask. + type: str + prefix_length: + description: + - Specify the service processor prefix_length. + type: int + wait_for_completion: + description: + - Set this parameter to 'true' for synchronous execution (wait until SP status is successfully updated) + - Set this parameter to 'false' for asynchronous execution + - For asynchronous, execution exits as soon as the request is sent, without checking SP status + type: bool + default: false + version_added: 2.8.0 +''' + +EXAMPLES = """ + - name: Modify Service Processor Network, enable dhcp. + netapp.ontap.na_ontap_service_processor_network: + state: present + address_type: ipv4 + is_enabled: true + dhcp: v4 + node: "{{ netapp_node }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" +""" + +RETURN = """ +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +import time + + +class NetAppOntapServiceProcessorNetwork: + """ + Modify a Service Processor Network + """ + + def __init__(self): + """ + Initialize the NetAppOntapServiceProcessorNetwork class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + address_type=dict(required=True, type='str', choices=['ipv4', 'ipv6']), + is_enabled=dict(required=False, type='bool'), + node=dict(required=True, type='str'), + dhcp=dict(required=False, type='str', choices=['v4', 'none']), + gateway_ip_address=dict(required=False, type='str'), + ip_address=dict(required=False, type='str'), + netmask=dict(required=False, type='str'), + prefix_length=dict(required=False, type='int'), + wait_for_completion=dict(required=False, type='bool', default=False) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('netmask', 'prefix_length')] + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.uuid, self.ipv4_or_ipv6 = None, None + dhcp_mutual_options = ['ip_address', 'gateway_ip_address', 'netmask'] + if self.parameters.get('dhcp') == 'v4': + # error if dhcp is set to v4 and address_type is ipv6. + if self.parameters['address_type'] == 'ipv6': + self.module.fail_json(msg="Error: dhcp cannot be set for address_type: ipv6.") + # error if dhcp is set to v4 and manual interface options are present. + if any(x in self.parameters for x in dhcp_mutual_options): + self.module.fail_json(msg="Error: set dhcp v4 or all of 'ip_address, gateway_ip_address, netmask'.") + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if 'is_enabled' not in self.parameters: + self.module.fail_json(msg='missing required arguments: is_enabled in ZAPI') + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=None) + self.set_playbook_zapi_key_map() + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_string_keys = { + 'address_type': 'address-type', + 'node': 'node', + 'dhcp': 'dhcp', + 'gateway_ip_address': 'gateway-ip-address', + 'ip_address': 'ip-address', + 'netmask': 'netmask' + } + self.na_helper.zapi_int_keys = { + 'prefix_length': 'prefix-length' + } + self.na_helper.zapi_bool_keys = { + 'is_enabled': 'is-enabled', + } + self.na_helper.zapi_required = { + 'address_type': 'address-type', + 'node': 'node', + 'is_enabled': 'is-enabled' + } + + def get_sp_network_status(self): + """ + Return status of service processor network + :param: + name : name of the node + :return: Status of the service processor network + :rtype: dict + """ + spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter') + query_info = { + 'query': { + 'service-processor-network-info': { + 'node': self.parameters['node'], + 'address-type': self.parameters['address_type'] + } + } + } + spn_get_iter.translate_struct(query_info) + try: + result = self.server.invoke_successfully(spn_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching service processor network status for %s: %s' % + (self.parameters['node'], to_native(error)), exception=traceback.format_exc()) + if int(result['num-records']) >= 1: + sp_attr_info = result['attributes-list']['service-processor-network-info'] + return sp_attr_info.get_child_content('setup-status') + return None + + def get_service_processor_network(self): + """ + Return details about service processor network + :param: + name : name of the node + :return: Details about service processor network. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_service_processor_network_rest() + spn_get_iter = netapp_utils.zapi.NaElement('service-processor-network-get-iter') + query_info = { + 'query': { + 'service-processor-network-info': { + 'node': self.parameters['node'] + } + } + } + spn_get_iter.translate_struct(query_info) + try: + result = self.server.invoke_successfully(spn_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching service processor network info for %s: %s' % + (self.parameters['node'], to_native(error)), exception=traceback.format_exc()) + sp_details = None + # check if job exists + if int(result['num-records']) >= 1: + sp_details = dict() + sp_attr_info = result['attributes-list']['service-processor-network-info'] + for item_key, zapi_key in self.na_helper.zapi_string_keys.items(): + sp_details[item_key] = sp_attr_info.get_child_content(zapi_key) + # set dhcp: 'none' if current dhcp set as None to avoid idempotent issue. + if item_key == 'dhcp' and sp_details[item_key] is None: + sp_details[item_key] = 'none' + for item_key, zapi_key in self.na_helper.zapi_bool_keys.items(): + sp_details[item_key] = self.na_helper.get_value_for_bool(from_zapi=True, + value=sp_attr_info.get_child_content(zapi_key)) + for item_key, zapi_key in self.na_helper.zapi_int_keys.items(): + sp_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True, + value=sp_attr_info.get_child_content(zapi_key)) + return sp_details + + def modify_service_processor_network(self, params=None): + """ + Modify a service processor network. + :param params: A dict of modified options. + When dhcp is not set to v4, ip_address, netmask, and gateway_ip_address must be specified even if remains the same. + """ + if self.use_rest: + return self.modify_service_processor_network_rest(params) + + sp_modify = netapp_utils.zapi.NaElement('service-processor-network-modify') + sp_attributes = dict() + for item_key in self.parameters: + if item_key in self.na_helper.zapi_string_keys: + zapi_key = self.na_helper.zapi_string_keys.get(item_key) + sp_attributes[zapi_key] = self.parameters[item_key] + elif item_key in self.na_helper.zapi_bool_keys: + zapi_key = self.na_helper.zapi_bool_keys.get(item_key) + sp_attributes[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, value=self.parameters[item_key]) + elif item_key in self.na_helper.zapi_int_keys: + zapi_key = self.na_helper.zapi_int_keys.get(item_key) + sp_attributes[zapi_key] = self.na_helper.get_value_for_int(from_zapi=False, value=self.parameters[item_key]) + sp_modify.translate_struct(sp_attributes) + try: + self.server.invoke_successfully(sp_modify, enable_tunneling=True) + if self.parameters.get('wait_for_completion'): + retries = 25 + # when try to enable and set dhcp:v4 or manual ip, the status will be 'not_setup' before changes to complete. + status_key = 'not_setup' if params.get('is_enabled') else 'in_progress' + while self.get_sp_network_status() == status_key and retries > 0: + time.sleep(15) + retries -= 1 + # In ZAPI, once the status is 'succeeded', it takes few more seconds for ip details take effect.. + time.sleep(10) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying service processor network: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def get_service_processor_network_rest(self): + api = 'cluster/nodes' + fields = 'uuid,service_processor,service_processor.dhcp_enabled' + query = {'name': self.parameters['node']} + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error fetching service processor network info for %s: %s' % + (self.parameters['node'], error)) + current = None + if record: + self.uuid = record['uuid'] + # if the desired address_type already configured in current, interface details will be returned. + # if the desired address_type not configured in current, None will be set in network interface options + # and setting either dhcp(for v4) or (ip_address, gateway_ip_address, netmask) will enable and configure the interface. + self.ipv4_or_ipv6 = 'ipv4_interface' if self.parameters['address_type'] == 'ipv4' else 'ipv6_interface' + netmask_or_prefix = 'netmask' if self.ipv4_or_ipv6 == 'ipv4_interface' else 'prefix_length' + current = { + 'dhcp': 'v4' if self.na_helper.safe_get(record, ['service_processor', 'dhcp_enabled']) else 'none', + 'gateway_ip_address': self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6, 'gateway']), + 'ip_address': self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6, 'address']), + 'is_enabled': True if self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6]) else False, + netmask_or_prefix: self.na_helper.safe_get(record, ['service_processor', self.ipv4_or_ipv6, 'netmask']) + } + return current + + def modify_service_processor_network_rest(self, modify): + api = 'cluster/nodes' + body = {'service_processor': {}} + ipv4_or_ipv6_body = {} + if self.parameters.get('gateway_ip_address'): + ipv4_or_ipv6_body['gateway'] = self.parameters['gateway_ip_address'] + if self.parameters.get('netmask'): + ipv4_or_ipv6_body['netmask'] = self.parameters['netmask'] + if self.parameters.get('prefix_length'): + ipv4_or_ipv6_body['netmask'] = self.parameters['prefix_length'] + if self.parameters.get('ip_address'): + ipv4_or_ipv6_body['address'] = self.parameters['ip_address'] + if ipv4_or_ipv6_body: + body['service_processor'][self.ipv4_or_ipv6] = ipv4_or_ipv6_body + if 'dhcp' in self.parameters: + body['service_processor']['dhcp_enabled'] = True if self.parameters['dhcp'] == 'v4' else False + # if dhcp is enabled in REST, setting ip_address details manually requires dhcp: 'none' in params. + # if dhcp: 'none' is not in params set it False to disable dhcp and assign manual ip address. + elif ipv4_or_ipv6_body.get('gateway') and ipv4_or_ipv6_body.get('address') and ipv4_or_ipv6_body.get('netmask'): + body['service_processor']['dhcp_enabled'] = False + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body) + if error: + self.module.fail_json(msg='Error modifying service processor network: %s' % error) + if self.parameters.get('wait_for_completion'): + retries = 25 + while self.is_sp_modified_rest(modify) is False and retries > 0: + time.sleep(15) + retries -= 1 + + def is_sp_modified_rest(self, modify): + current = self.get_service_processor_network_rest() + if current is None: + return False + for sp_option in modify: + if modify[sp_option] != current[sp_option]: + return False + return True + + def validate_rest(self, modify): + # error if try to disable service processor network status in REST. + if modify.get('is_enabled') is False: + error = "Error: disable service processor network status not allowed in REST" + self.module.fail_json(msg=error) + # error if try to enable and modify not have either dhcp or (ip_address, netamsk, gateway) + if modify.get('is_enabled') and len(modify) == 1: + error = "Error: enable service processor network requires dhcp or ip_address,netmask,gateway details in REST." + self.module.fail_json(msg=error) + + def validate_zapi(self, modify): + if self.parameters['is_enabled'] is False: + if len(modify) > 1 and 'is_enabled' in modify: + self.module.fail_json(msg='Error: Cannot modify any other parameter for a service processor network if option "is_enabled" is set to false.') + elif modify and 'is_enabled' not in modify: + self.module.fail_json(msg='Error: Cannot modify a service processor network if it is disabled in ZAPI.') + + def apply(self): + """ + Run Module based on play book + """ + current = self.get_service_processor_network() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if not current: + self.module.fail_json(msg='Error No Service Processor for node: %s' % self.parameters['node']) + if modify: + # disable dhcp requires configuring one of ip-address, netmask and gateway different from current. + if modify.get('dhcp') == 'none' and not any(x in modify for x in ['ip_address', 'gateway_ip_address', 'netmask']): + error = "Error: To disable dhcp, configure ip-address, netmask and gateway details manually." + self.module.fail_json(msg=error) + self.validate_rest(modify) if self.use_rest else self.validate_zapi(modify) + if self.na_helper.changed and not self.module.check_mode: + self.modify_service_processor_network(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + """ + Create the NetApp Ontap Service Processor Network Object and modify it + """ + + obj = NetAppOntapServiceProcessorNetwork() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py new file mode 100644 index 000000000..d7f33dbea --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snaplock_clock.py @@ -0,0 +1,177 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_snaplock_clock + +short_description: NetApp ONTAP Sets the snaplock compliance clock. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.4.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Sets the Snaplock compliance clock on NetApp ONTAP. + +options: + node: + description: + - Name of the node to set compliance clock on. + type: str + required: true + +''' + +EXAMPLES = """ + - name: Set node compliance clock + netapp.ontap.na_ontap_snaplock_clock: + node: cluster1-01 + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapSnaplockClock: + '''Class with SnapLock clock operations''' + + def __init__(self): + '''Initialize module parameters''' + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + node=dict(required=True, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_snaplock_node_compliance_clock(self): + + if self.use_rest: + """ + Return snaplock-node-compliance-clock query results + :return: dict of clock info + """ + api = "private/cli/snaplock/compliance-clock" + query = { + 'fields': 'node,time', + 'node': self.parameters['node'], + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_1_records(api, message, error) + + if error is None and records is not None: + return_value = { + 'node': message['records'][0]['node'], + 'compliance_clock_time': message['records'][0]['time'] + } + + if error: + self.module.fail_json(msg=error) + + if not records: + error = "REST API did not return snaplock compliance clock for node %s" % (self.parameters['node']) + self.module.fail_json(msg=error) + + else: + """ + Return snaplock-node-compliance-clock query results + :param node_name: name of the cluster node + :return: NaElement + """ + + node_snaplock_clock = netapp_utils.zapi.NaElement('snaplock-get-node-compliance-clock') + node_snaplock_clock.add_new_child('node', self.parameters['node']) + + try: + result = self.server.invoke_successfully(node_snaplock_clock, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching snaplock compliance clock for node %s : %s' + % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + + return_value = None + + if result.get_child_by_name('snaplock-node-compliance-clock'): + node_snaplock_clock_attributes = result['snaplock-node-compliance-clock']['compliance-clock-info'] + return_value = { + 'compliance_clock_time': node_snaplock_clock_attributes['formatted-snaplock-compliance-clock'], + } + return return_value + + def set_snaplock_node_compliance_clock(self): + '''Set ONTAP snaplock compliance clock for each node''' + if self.use_rest: + api = "private/cli/snaplock/compliance-clock/initialize" + query = { + "node": self.parameters['node'] + } + + body = {} + dummy, error = self.rest_api.patch(api, body, query) + if error: + self.module.fail_json(msg=error) + else: + node_snaplock_clock_obj = netapp_utils.zapi.NaElement('snaplock-set-node-compliance-clock') + node_snaplock_clock_obj.add_new_child('node', self.parameters['node']) + + try: + result = self.server.invoke_successfully(node_snaplock_clock_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error setting snaplock compliance clock for node %s : %s' + % (self.parameters['node'], to_native(error)), + exception=traceback.format_exc()) + return result + + def apply(self): + current = self.get_snaplock_node_compliance_clock() + + if current['compliance_clock_time'] == "ComplianceClock is not configured.": + self.na_helper.changed = True + + if self.na_helper.changed and not self.module.check_mode: + self.set_snaplock_node_compliance_clock() + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + '''Set snaplock compliance clock''' + obj = NetAppOntapSnaplockClock() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py new file mode 100644 index 000000000..26254e03b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror.py @@ -0,0 +1,1749 @@ +#!/usr/bin/python + +''' +na_ontap_snapmirror +''' + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete/Update/Initialize/Break/Resync/Resume SnapMirror volume/vserver relationships for ONTAP/ONTAP + - This includes SVM replication, aka vserver DR + - Create/Delete/Update/Initialize SnapMirror volume relationship between ElementSW and ONTAP + - Modify schedule for a SnapMirror relationship for ONTAP/ONTAP and ElementSW/ONTAP + - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is an established SnapMirror endpoint for ONTAP cluster with ElementSW UI + - Pre-requisite for ElementSW to ONTAP relationship or vice-versa is to have SnapMirror enabled in the ElementSW volume + - For creating a SnapMirror ElementSW/ONTAP relationship, an existing ONTAP/ElementSW relationship should be present + - Performs resync if the C(relationship_state=active) and the current mirror state of the snapmirror relationship is broken-off + - Performs resume if the C(relationship_state=active), the current snapmirror relationship status is quiesced and mirror state is snapmirrored + - Performs restore if the C(relationship_type=restore) and all other operations will not be performed during this task +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap + - netapp.ontap.netapp.na_ontap_peer +module: na_ontap_snapmirror +options: + state: + choices: ['present', 'absent'] + description: + - Whether the specified relationship should exist or not. + default: present + type: str + source_volume: + description: + - Specifies the name of the source volume for the SnapMirror. + - Deprecated as of 21.2.0, use source_endpoint and path. + type: str + destination_volume: + description: + - Specifies the name of the destination volume for the SnapMirror. + - Deprecated as of 21.2.0, use source_endpoint and path. + type: str + source_vserver: + description: + - Name of the source vserver for the SnapMirror. + - Deprecated as of 21.2.0, use source_endpoint and path, or svm. + type: str + destination_vserver: + description: + - Name of the destination vserver for the SnapMirror. + - Deprecated as of 21.2.0, use destination_endpoint and path, or svm. + type: str + source_path: + description: + - Specifies the source endpoint of the SnapMirror relationship. + - If the source is an ONTAP volume, format should be <[vserver:][volume]> or <[[cluster:]//vserver/]volume> + - If the source is an ElementSW volume, format should be <[Element_SVIP]:/lun/[Element_VOLUME_ID]> + - If the source is an ElementSW volume, the volume should have SnapMirror enabled. + - Deprecated as of 21.2.0, use source_endpoint and path. + type: str + destination_path: + description: + - Specifies the destination endpoint of the SnapMirror relationship. + - Deprecated as of 21.2.0, use destination_endpoint and path. + type: str + relationship_type: + choices: ['data_protection', 'load_sharing', 'vault', 'restore', 'transition_data_protection', + 'extended_data_protection'] + type: str + description: + - Specify the type of SnapMirror relationship. + - for 'restore' unless 'source_snapshot' is specified the most recent Snapshot copy on the source volume is restored. + - restore SnapMirror is not idempotent. + - With REST, only 'extended_data_protection' and 'restore' are supported. + schedule: + description: + - Specify the name of the current schedule, which is used to update the SnapMirror relationship. + - Optional for create, modifiable. + - With REST, this option requires ONTAP 9.11.1 or later. + type: str + aliases: ['transfer_schedule'] + version_added: 22.2.0 + policy: + description: + - Specify the name of the SnapMirror policy that applies to this relationship. + version_added: 2.8.0 + type: str + source_hostname: + description: + - DEPRECATED - please use C(peer_options). + - Source hostname or management IP address for ONTAP or ElementSW cluster. + - If present, when state is absent, the relationship is released at the source before being deleted at destination. + - It is recommended to always release before deleting, so make sure this parameter is present if the source hostname is known. + type: str + source_username: + description: + - DEPRECATED - please use C(peer_options). + - Source username for ONTAP or ElementSW cluster. + - Optional if this is same as destination username. + type: str + source_password: + description: + - DEPRECATED - please use C(peer_options). + - Source password for ONTAP or ElementSW cluster. + - Optional if this is same as destination password. + type: str + connection_type: + description: + - Type of SnapMirror relationship. + - Pre-requisite for either elementsw_ontap or ontap_elementsw the ElementSW volume should have enableSnapmirror option set to true. + - For using ontap_elementsw, elementsw_ontap snapmirror relationship should exist. + choices: ['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw'] + default: ontap_ontap + type: str + version_added: 2.9.0 + max_transfer_rate: + description: + - Specifies the upper bound, in kilobytes per second, at which data is transferred. + - Default is unlimited, it can be explicitly set to 0 as unlimited. + type: int + version_added: 2.9.0 + initialize: + description: + - Specifies whether to initialize SnapMirror relation. + - Default is True, it can be explicitly set to False to avoid initializing SnapMirror relation. + default: true + type: bool + version_added: '19.11.0' + update: + description: + - Specifies whether to update the destination endpoint of the SnapMirror relationship only if the relationship is already present and active. + - Default is True. + default: true + type: bool + version_added: '20.2.0' + relationship_info_only: + description: + - If relationship-info-only is set to true then only relationship information is removed. + default: false + type: bool + version_added: '20.4.0' + relationship_state: + description: + - Specifies whether to break SnapMirror relation or establish a SnapMirror relationship. + - state must be present to use this option. + default: active + choices: ['active', 'broken'] + type: str + version_added: '20.2.0' + source_snapshot: + description: + - Specifies the Snapshot from the source to be restored. + type: str + version_added: '20.6.0' + identity_preserve: + description: + - Specifies whether or not the identity of the source Vserver is replicated to the destination Vserver. + - If this parameter is set to true, the source Vserver's configuration will additionally be replicated to the destination. + - If the parameter is set to false, then only the source Vserver's volumes and RBAC configuration are replicated to the destination. + type: bool + version_added: 2.9.0 + create_destination: + description: + - Requires ONTAP 9.7 or later. + - Creates the destination volume if enabled and destination_volume is present or destination_path includes a volume name. + - Creates and peers the destination vserver for SVM DR. + type: dict + version_added: 21.1.0 + suboptions: + enabled: + description: + - Whether to create the destination volume or vserver. + - This is automatically enabled if any other suboption is present. + type: bool + default: true + storage_service: + description: storage service associated with the destination endpoint. + type: dict + suboptions: + enabled: + description: whether to create the destination endpoint using storage service. + type: bool + enforce_performance: + description: whether to enforce storage service performance on the destination endpoint. + type: bool + name: + description: the performance service level (PSL) for this volume endpoint. + type: str + choices: ['value', 'performance', 'extreme'] + tiering: + description: + - Cloud tiering policy. + type: dict + suboptions: + policy: + description: + - Cloud tiering policy. + choices: ['all', 'auto', 'none', 'snapshot-only'] + type: str + supported: + description: + - enable provisioning of the destination endpoint volumes on FabricPool aggregates. + - only supported for FlexVol volume, FlexGroup volume, and Consistency Group endpoints. + type: bool + destination_cluster: + description: + - Requires ONTAP 9.7 or higher. + - Required to create the destination vserver for SVM DR or the destination volume. + - Deprecated as of 21.2.0, use destination_endpoint and cluster. + type: str + version_added: 21.1.0 + source_cluster: + description: + - Requires ONTAP 9.7 or higher. + - Required to create the peering relationship between source and destination SVMs. + - Deprecated as of 21.2.0, use source_endpoint and cluster. + type: str + version_added: 21.1.0 + source_endpoint: + description: + - source endpoint of a SnapMirror relationship. + type: dict + version_added: 21.2.0 + suboptions: + cluster: + description: + - Requires ONTAP 9.7 or higher. + - Required to create the peering relationship between source and destination SVMs. + type: str + consistency_group_volumes: + description: + - Requires ONTAP 9.8 or higher. + - Mandatory property for a Consistency Group endpoint. Specifies the list of FlexVol volumes for a Consistency Group. + type: list + elements: str + ipspace: + description: + - Requires ONTAP 9.8 or higher. + - Optional property to specify the IPSpace of the SVM. + type: str + path: + description: + - The source endpoint for the relationship. + - If the source is an ONTAP volume (FlexVol or FlexGroup), format should be + - For SVM DR, format should be + - For a consistency group, format should be + - If the source is an ElementSW volume, format should be + - If the source is an ElementSW volume, the volume should have SnapMirror enabled. + type: str + required: true + svm: + description: + - The name of the SVM. Not sure when this is needed. + type: str + destination_endpoint: + description: + - destination endpoint of a SnapMirror relationship. + type: dict + version_added: 21.2.0 + suboptions: + cluster: + description: + - Requires ONTAP 9.7 or higher. + - Required to create the destination vserver for SVM DR or the destination volume. + type: str + consistency_group_volumes: + description: + - Requires ONTAP 9.8 or higher. + - Mandatory property for a Consistency Group endpoint. Specifies the list of FlexVol volumes for a Consistency Group. + type: list + elements: str + ipspace: + description: + - Requires ONTAP 9.8 or higher. + - Optional property to specify the IPSpace of the SVM. + type: str + path: + description: + - The destination endpoint for the relationship. + - format is , , + type: str + required: true + svm: + description: + - The name of the SVM. Not sure when this is needed. + type: str + transferring_time_out: + description: + - How long to wait when a transfer is in progress (after initializing for instance). Unit is seconds. + default: 300 + type: int + version_added: 21.20.0 + clean_up_failure: + description: + - An optional parameter to recover from an aborted or failed restore operation. + - Any temporary RST relationship is removed from the destination Vserver. + - Only supported with ZAPI. + default: False + type: bool + version_added: 21.20.0 + validate_source_path: + description: + - The relationship is found based on the destination as it is unique. + - By default, the source information is verified and an error is reported if there is a mismatch. + This would mean the destination is already used by another relationship. + - The check accounts for a local vserver name that may be different from the remote vserver name. + - This may be disabled in case the check is too strict, to unconditionally delete a realtionship for instance. + default: True + type: bool + version_added: 21.21.0 + identity_preservation: + description: + - Specifies which configuration of the source SVM is replicated to the destination SVM. + - This property is applicable only for SVM data protection with "async" policy type. + - Only supported with REST and requires ONTAP 9.11.1 or later. + type: str + choices: ['full', 'exclude_network_config', 'exclude_network_and_protocol_config'] + version_added: '22.4.0' + +short_description: "NetApp ONTAP or ElementSW Manage SnapMirror" +version_added: 2.7.0 +notes: + - supports REST and ZAPI. + - supports check_mode. + - restore is not idempotent. + - snapmirror runs on the destination for most operations, peer_options identify the source cluster. + - ONTAP supports either username/password or a SSL certificate for authentication. + - ElementSW only supports username/password for authentication. +''' + +EXAMPLES = """ + + # creates and initializes the snapmirror + - name: Create ONTAP/ONTAP SnapMirror + netapp.ontap.na_ontap_snapmirror: + state: present + source_volume: test_src + destination_volume: test_dest + source_vserver: ansible_src + destination_vserver: ansible_dest + schedule: hourly + policy: MirrorAllSnapshots + max_transfer_rate: 1000 + initialize: False + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + # creates and initializes the snapmirror between vservers + - name: Create ONTAP/ONTAP vserver SnapMirror + netapp.ontap.na_ontap_snapmirror: + state: present + source_vserver: ansible_src + destination_vserver: ansible_dest + identity_preserve: true + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + # existing snapmirror relation with status 'snapmirrored' will be initialized + - name: Inititalize ONTAP/ONTAP SnapMirror + netapp.ontap.na_ontap_snapmirror: + state: present + source_path: 'ansible:test' + destination_path: 'ansible:dest' + relationship_state: active + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + - name: Delete SnapMirror + netapp.ontap.na_ontap_snapmirror: + state: absent + destination_path: + relationship_info_only: True + source_hostname: "{{ source_hostname }}" + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + - name: Break SnapMirror + netapp.ontap.na_ontap_snapmirror: + state: present + relationship_state: broken + destination_path: + source_hostname: "{{ source_hostname }}" + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + - name: Restore SnapMirror volume using location (Idempotency) + netapp.ontap.na_ontap_snapmirror: + state: present + source_path: + destination_path: + relationship_type: restore + source_snapshot: "{{ snapshot }}" + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + - name: Set schedule to NULL + netapp.ontap.na_ontap_snapmirror: + state: present + destination_path: + schedule: "" + hostname: "{{ destination_cluster_hostname }}" + username: "{{ destination_cluster_username }}" + password: "{{ destination_cluster_password }}" + + - name: Create SnapMirror from ElementSW to ONTAP + netapp.ontap.na_ontap_snapmirror: + state: present + connection_type: elementsw_ontap + source_path: '10.10.10.10:/lun/300' + destination_path: 'ansible_test:ansible_dest_vol' + schedule: hourly + policy: MirrorLatest + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + source_hostname: " {{ Element_cluster_mvip }}" + source_username: "{{ Element_cluster_username }}" + source_password: "{{ Element_cluster_password }}" + + - name: Create SnapMirror from ONTAP to ElementSW + netapp.ontap.na_ontap_snapmirror: + state: present + connection_type: ontap_elementsw + destination_path: '10.10.10.10:/lun/300' + source_path: 'ansible_test:ansible_dest_vol' + policy: MirrorLatest + hostname: "{{ Element_cluster_mvip }}" + username: "{{ Element_cluster_username }}" + password: "{{ Element_cluster_password }}" + source_hostname: " {{ netapp_hostname }}" + source_username: "{{ netapp_username }}" + source_password: "{{ netapp_password }}" + + - name: Create SnapMirror relationship (create destination volume) + tags: create + netapp.ontap.na_ontap_snapmirror: + state: present + source_endpoint: + cluster: "{{ _source_cluster }}" + path: "{{ source_vserver + ':' + source_volume }}" + destination_endpoint: + cluster: "{{ _destination_cluster }}" + path: "{{ destination_vserver_VOLDP + ':' + destination_volume }}" + create_destination: + enabled: true + hostname: "{{ destination_hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Create SnapMirror relationship - SVM DR (creating and peering destination svm) + tags: create_svmdr + netapp.ontap.na_ontap_snapmirror: + state: present + source_endpoint: + cluster: "{{ _source_cluster }}" + path: "{{ source_vserver + ':' }}" + destination_endpoint: + cluster: "{{ _destination_cluster }}" + path: "{{ destination_vserver_SVMDR + ':' }}" + create_destination: + enabled: true + hostname: "{{ destination_hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false +""" + +RETURN = """ +""" + +import re +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_elementsw_module import NaElementSWModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_SF_SDK = netapp_utils.has_sf_sdk() +try: + import solidfire.common +except ImportError: + HAS_SF_SDK = False + + +class NetAppONTAPSnapmirror(object): + """ + Class with SnapMirror methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + destination_endpoint=dict(type='dict', options=dict( + cluster=dict(type='str'), + consistency_group_volumes=dict(type='list', elements='str'), + ipspace=dict(type='str'), + path=dict(required=True, type='str'), + svm=dict(type='str'), + )), + source_endpoint=dict(type='dict', options=dict( + cluster=dict(type='str'), + consistency_group_volumes=dict(type='list', elements='str'), + ipspace=dict(type='str'), + path=dict(required=True, type='str'), + svm=dict(type='str'), + )), + source_vserver=dict(required=False, type='str'), + destination_vserver=dict(required=False, type='str'), + source_volume=dict(required=False, type='str'), + destination_volume=dict(required=False, type='str'), + source_path=dict(required=False, type='str'), + destination_path=dict(required=False, type='str'), + schedule=dict(required=False, type='str', aliases=['transfer_schedule']), + policy=dict(required=False, type='str'), + relationship_type=dict(required=False, type='str', + choices=['data_protection', 'load_sharing', + 'vault', 'restore', + 'transition_data_protection', + 'extended_data_protection'] + ), + connection_type=dict(required=False, type='str', + choices=['ontap_ontap', 'elementsw_ontap', 'ontap_elementsw'], + default='ontap_ontap'), + peer_options=dict(type='dict', options=netapp_utils.na_ontap_host_argument_spec_peer()), + source_hostname=dict(required=False, type='str'), + source_username=dict(required=False, type='str'), + source_password=dict(required=False, type='str', no_log=True), + max_transfer_rate=dict(required=False, type='int'), + initialize=dict(required=False, type='bool', default=True), + update=dict(required=False, type='bool', default=True), + identity_preserve=dict(required=False, type='bool'), + identity_preservation=dict(required=False, type="str", choices=['full', 'exclude_network_config', 'exclude_network_and_protocol_config']), + relationship_state=dict(required=False, type='str', choices=['active', 'broken'], default='active'), + relationship_info_only=dict(required=False, type='bool', default=False), + source_snapshot=dict(required=False, type='str'), + create_destination=dict(required=False, type='dict', options=dict( + enabled=dict(type='bool', default=True), + storage_service=dict(type='dict', options=dict( + enabled=dict(type='bool'), + enforce_performance=dict(type='bool'), + name=dict(type='str', choices=['value', 'performance', 'extreme']), + )), + tiering=dict(type='dict', options=dict( + policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']), + supported=dict(type='bool') + )), + )), + source_cluster=dict(required=False, type='str'), + destination_cluster=dict(required=False, type='str'), + transferring_time_out=dict(required=False, type='int', default=300), + clean_up_failure=dict(required=False, type='bool', default=False), + validate_source_path=dict(required=False, type='bool', default=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ('source_endpoint', 'source_cluster'), + ('source_endpoint', 'source_path'), + ('source_endpoint', 'source_volume'), + ('source_endpoint', 'source_vserver'), + ('destination_endpoint', 'destination_cluster'), + ('destination_endpoint', 'destination_path'), + ('destination_endpoint', 'destination_volume'), + ('destination_endpoint', 'destination_vserver'), + ('peer_options', 'source_hostname'), + ('peer_options', 'source_username'), + ('peer_options', 'source_password'), + ('identity_preserve', 'identity_preservation') + ], + required_together=(['source_volume', 'destination_volume'], + ['source_vserver', 'destination_vserver'], + ['source_endpoint', 'destination_endpoint'], + ), + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.new_style = False + # when deleting, ignore previous errors, but report them if delete fails + self.previous_errors = [] + # setup later if required + self.source_server = None + # only for ElementSW -> ONTAP snapmirroring, validate if ElementSW SDK is available + if self.parameters.get('connection_type') in ['elementsw_ontap', 'ontap_elementsw'] and HAS_SF_SDK is False: + self.module.fail_json(msg="Unable to import the SolidFire Python SDK") + + self.src_rest_api = None + self.src_use_rest = None + self.set_source_peer() + self.rest_api, self.use_rest = self.setup_rest() + if not self.use_rest: + self.server = self.setup_zapi() + + def set_source_peer(self): + if self.parameters.get('source_hostname') is None and self.parameters.get('peer_options') is None: + if self.parameters.get('connection_type') == 'ontap_elementsw': + return self.module.fail_json(msg='Error: peer_options are required to identify ONTAP cluster with connection_type: ontap_elementsw') + if self.parameters.get('connection_type') == 'elementsw_ontap': + return self.module.fail_json(msg='Error: peer_options are required to identify SolidFire cluster with connection_type: elementsw_ontap') + if self.parameters.get('source_hostname') is not None: + # if source_hostname is present, peer_options is absent + self.parameters['peer_options'] = dict( + hostname=self.parameters.get('source_hostname'), + username=self.parameters.get('source_username'), + password=self.parameters.get('source_password'), + ) + elif self.na_helper.safe_get(self.parameters, ['peer_options', 'hostname']): + self.parameters['source_hostname'] = self.parameters['peer_options']['hostname'] + if 'peer_options' in self.parameters: + netapp_utils.setup_host_options_from_module_params( + self.parameters['peer_options'], self.module, + netapp_utils.na_ontap_host_argument_spec_peer().keys()) + + def setup_rest(self): + unsupported_rest_properties = ['identity_preserve', 'max_transfer_rate'] + host_options = self.parameters['peer_options'] if self.parameters.get('connection_type') == 'ontap_elementsw' else None + rest_api = netapp_utils.OntapRestAPI(self.module, host_options=host_options) + rtype = self.parameters.get('relationship_type') + if rtype not in (None, 'extended_data_protection', 'restore'): + unsupported_rest_properties.append('relationship_type') + used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters] + ontap_97_options = ['create_destination', 'source_cluster', 'destination_cluster'] + partially_supported_rest_properties = [(property, (9, 7)) for property in ontap_97_options] + partially_supported_rest_properties.extend([('schedule', (9, 11, 1)), ('identity_preservation', (9, 11, 1))]) + use_rest, error = rest_api.is_rest_supported_properties( + self.parameters, used_unsupported_rest_properties, partially_supported_rest_properties, report_error=True) + if error is not None: + if 'relationship_type' in error: + error = error.replace('relationship_type', 'relationship_type: %s' % rtype) + if 'schedule' in error: + error += ' - With REST use the policy option to define a schedule.' + self.module.fail_json(msg=error) + + if not use_rest and any(x in self.parameters for x in ontap_97_options): + self.module.fail_json(msg='Error: %s' % rest_api.options_require_ontap_version(ontap_97_options, version='9.7', use_rest=use_rest)) + return rest_api, use_rest + + def setup_zapi(self): + if self.parameters.get('identity_preservation'): + self.module.fail_json(msg="Error: The option identity_preservation is supported only with REST.") + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + host_options = self.parameters['peer_options'] if self.parameters.get('connection_type') == 'ontap_elementsw' else None + return netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=host_options) + + def set_element_connection(self, kind): + if kind == 'source': + elem = netapp_utils.create_sf_connection(module=self.module, host_options=self.parameters['peer_options']) + elif kind == 'destination': + elem = netapp_utils.create_sf_connection(module=self.module, host_options=self.parameters) + elementsw_helper = NaElementSWModule(elem) + return elementsw_helper, elem + + def snapmirror_get_iter(self, destination=None): + """ + Compose NaElement object to query current SnapMirror relations using destination-path + SnapMirror relation for a destination path is unique + :return: NaElement object for SnapMirror-get-iter + """ + snapmirror_get_iter = netapp_utils.zapi.NaElement('snapmirror-get-iter') + query = netapp_utils.zapi.NaElement('query') + snapmirror_info = netapp_utils.zapi.NaElement('snapmirror-info') + if destination is None: + destination = self.parameters['destination_path'] + snapmirror_info.add_new_child('destination-location', destination) + query.add_child_elem(snapmirror_info) + snapmirror_get_iter.add_child_elem(query) + return snapmirror_get_iter + + def snapmirror_get(self, destination=None): + """ + Get current SnapMirror relations + :return: Dictionary of current SnapMirror details if query successful, else None + """ + if self.use_rest: + return self.snapmirror_get_rest(destination) + + snapmirror_get_iter = self.snapmirror_get_iter(destination) + try: + result = self.server.invoke_successfully(snapmirror_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching snapmirror info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) > 0: + snapmirror_info = result.get_child_by_name('attributes-list').get_child_by_name( + 'snapmirror-info') + snap_info = {} + snap_info['mirror_state'] = snapmirror_info.get_child_content('mirror-state') + snap_info['status'] = snapmirror_info.get_child_content('relationship-status') + snap_info['schedule'] = snapmirror_info.get_child_content('schedule') + snap_info['policy'] = snapmirror_info.get_child_content('policy') + snap_info['relationship_type'] = snapmirror_info.get_child_content('relationship-type') + snap_info['current_transfer_type'] = snapmirror_info.get_child_content('current-transfer-type') + snap_info['source_path'] = snapmirror_info.get_child_content('source-location') + if snapmirror_info.get_child_by_name('max-transfer-rate'): + snap_info['max_transfer_rate'] = int(snapmirror_info.get_child_content('max-transfer-rate')) + if snapmirror_info.get_child_by_name('last-transfer-error'): + snap_info['last_transfer_error'] = snapmirror_info.get_child_content('last-transfer-error') + if snapmirror_info.get_child_by_name('is-healthy') is not None: + snap_info['is_healthy'] = self.na_helper.get_value_for_bool(True, snapmirror_info.get_child_content('is-healthy')) + if snapmirror_info.get_child_by_name('unhealthy-reason'): + snap_info['unhealthy_reason'] = snapmirror_info.get_child_content('unhealthy-reason') + if snap_info['schedule'] is None: + snap_info['schedule'] = "" + return snap_info + return None + + def wait_for_idle_status(self): + # sleep for a maximum of X seconds (with a default of 5 minutes), in 30 seconds increments + transferring_time_out = self.parameters['transferring_time_out'] + increment = 30 + if transferring_time_out <= 0: + return self.snapmirror_get() + for __ in range(0, transferring_time_out, increment): + time.sleep(increment) + current = self.snapmirror_get() + if current and current['status'] != 'transferring': + return current + self.module.warn('SnapMirror relationship is still transferring after %d seconds.' % transferring_time_out) + return current + + def wait_for_quiesced_status(self): + # sleep for a maximum of 25 seconds, in 5 seconds increments + for __ in range(5): + time.sleep(5) + sm_info = self.snapmirror_get() + if sm_info['status'] == 'quiesced' or sm_info['mirror_state'] == 'paused': + return + self.module.fail_json(msg='Taking a long time to quiesce SnapMirror relationship, try again later') + + def check_if_remote_volume_exists(self): + """ + Validate existence of source volume + :return: True if volume exists, False otherwise + """ + self.set_source_cluster_connection() + + if self.src_use_rest: + return self.check_if_remote_volume_exists_rest() + + # do a get volume to check if volume exists or not + volume_info = netapp_utils.zapi.NaElement('volume-get-iter') + volume_attributes = netapp_utils.zapi.NaElement('volume-attributes') + volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes') + volume_id_attributes.add_new_child('name', self.parameters['source_volume']) + # if source_volume is present, then source_vserver is also guaranteed to be present + volume_id_attributes.add_new_child('vserver-name', self.parameters['source_vserver']) + volume_attributes.add_child_elem(volume_id_attributes) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(volume_attributes) + volume_info.add_child_elem(query) + try: + result = self.source_server.invoke_successfully(volume_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching source volume details %s: %s' + % (self.parameters['source_volume'], to_native(error)), + exception=traceback.format_exc()) + return bool(result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0) + + def get_svm_from_destination_vserver_or_path(self): + svm_name = self.parameters.get('destination_vserver') + if svm_name is None: + path = self.parameters.get('destination_path') + if path is not None: + # if there is no ':' in path, it returns path + svm_name = path.split(':', 1)[0] + return svm_name + + def set_initialization_state(self): + """ + return: + 'snapmirrored' for relationships with a policy of type 'async' + 'in_sync' for relationships with a policy of type 'sync' + """ + policy_type = 'async' # REST defaults to Asynchronous + if self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'consistency_group_volumes']) is not None: + # except for consistency groups + policy_type = 'sync' + if self.parameters.get('policy') is not None: + svm_name = self.get_svm_from_destination_vserver_or_path() + policy_type, error = self.snapmirror_policy_rest_get(self.parameters['policy'], svm_name) + if error: + error = 'Error fetching SnapMirror policy: %s' % error + elif policy_type is None: + error = 'Error: cannot find policy %s for vserver %s' % (self.parameters['policy'], svm_name) + elif policy_type not in ('async', 'sync'): + error = 'Error: unexpected type: %s for policy %s for vserver %s' % (policy_type, self.parameters['policy'], svm_name) + if error: + self.module.fail_json(msg=error) + return 'snapmirrored' if policy_type == 'async' else 'in_sync' + + @staticmethod + def string_or_none(value): + """ REST expect null for "" """ + return value or None + + def get_create_body(self): + """ + It gathers the required information for snapmirror create + """ + initialized = False + body = { + "source": self.na_helper.filter_out_none_entries(self.parameters['source_endpoint']), + "destination": self.na_helper.filter_out_none_entries(self.parameters['destination_endpoint']) + } + if self.na_helper.safe_get(self.parameters, ['create_destination', 'enabled']): # testing for True + body['create_destination'] = self.na_helper.filter_out_none_entries(self.parameters['create_destination']) + if self.parameters['initialize']: + body['state'] = self.set_initialization_state() + initialized = True + if self.na_helper.safe_get(self.parameters, ['policy']) is not None: + body['policy'] = {'name': self.parameters['policy']} + if self.na_helper.safe_get(self.parameters, ['schedule']) is not None: + body['transfer_schedule'] = {'name': self.string_or_none(self.parameters['schedule'])} + if self.parameters.get('identity_preservation'): + body['identity_preservation'] = self.parameters['identity_preservation'] + return body, initialized + + def snapmirror_create(self): + """ + Create a SnapMirror relationship + """ + if self.parameters.get('peer_options') and self.parameters.get('source_volume') and not self.check_if_remote_volume_exists(): + self.module.fail_json(msg='Source volume does not exist. Please specify a volume that exists') + if self.use_rest: + return self.snapmirror_rest_create() + + options = {'source-location': self.parameters['source_path'], + 'destination-location': self.parameters['destination_path']} + snapmirror_create = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-create', **options) + if self.parameters.get('relationship_type'): + snapmirror_create.add_new_child('relationship-type', self.parameters['relationship_type']) + if self.parameters.get('schedule'): + snapmirror_create.add_new_child('schedule', self.parameters['schedule']) + if self.parameters.get('policy'): + snapmirror_create.add_new_child('policy', self.parameters['policy']) + if self.parameters.get('max_transfer_rate'): + snapmirror_create.add_new_child('max-transfer-rate', str(self.parameters['max_transfer_rate'])) + if self.parameters.get('identity_preserve'): + snapmirror_create.add_new_child('identity-preserve', self.na_helper.get_value_for_bool(False, self.parameters['identity_preserve'])) + try: + self.server.invoke_successfully(snapmirror_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating SnapMirror %s' % to_native(error), + exception=traceback.format_exc()) + if self.parameters['initialize']: + self.snapmirror_initialize() + + def set_source_cluster_connection(self): + """ + Setup ontap ZAPI or REST server connection for source hostname + :return: None + """ + self.src_rest_api = netapp_utils.OntapRestAPI(self.module, host_options=self.parameters['peer_options']) + unsupported_rest_properties = ['identity_preserve', 'max_transfer_rate', 'schedule'] + rtype = self.parameters.get('relationship_type') + if rtype not in (None, 'extended_data_protection', 'restore'): + unsupported_rest_properties.append('relationship_type') + used_unsupported_rest_properties = [x for x in unsupported_rest_properties if x in self.parameters] + self.src_use_rest, error = self.src_rest_api.is_rest(used_unsupported_rest_properties) + if error is not None: + if 'relationship_type' in error: + error = error.replace('relationship_type', 'relationship_type: %s' % rtype) + self.module.fail_json(msg=error) + if not self.src_use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.source_server = netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=self.parameters['peer_options']) + + def delete_snapmirror(self, relationship_type, mirror_state): + """ + Delete a SnapMirror relationship + #1. Quiesce the SnapMirror relationship at destination + #2. Break the SnapMirror relationship at the destination + #3. Release the SnapMirror at source + #4. Delete SnapMirror at destination + """ + # Quiesce and Break at destination + if relationship_type not in ['load_sharing', 'vault'] and mirror_state not in ['uninitialized', 'broken-off', 'broken_off']: + self.snapmirror_break(before_delete=True) + # if source is ONTAP, release the destination at source cluster + # if the source_hostname is unknown, do not run snapmirror_release + if self.parameters.get('peer_options') is not None and self.parameters.get('connection_type') != 'elementsw_ontap' and not self.use_rest: + self.set_source_cluster_connection() + if self.get_destination(): + # Release at source + # Note: REST remove the source from destination, so not required to release from source for REST + self.snapmirror_release() + # Delete at destination + self.snapmirror_delete() + + def snapmirror_quiesce(self): + """ + Quiesce SnapMirror relationship - disable all future transfers to this destination + """ + if self.use_rest: + return self.snapmirror_quiesce_rest() + + options = {'destination-location': self.parameters['destination_path']} + + snapmirror_quiesce = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapmirror-quiesce', **options) + try: + self.server.invoke_successfully(snapmirror_quiesce, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error quiescing SnapMirror: %s' + % (to_native(error)), exception=traceback.format_exc()) + # checking if quiesce was passed successfully + self.wait_for_quiesced_status() + + def snapmirror_delete(self): + """ + Delete SnapMirror relationship at destination cluster + """ + if self.use_rest: + return self.snapmirror_delete_rest() + options = {'destination-location': self.parameters['destination_path']} + + snapmirror_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapmirror-destroy', **options) + try: + self.server.invoke_successfully(snapmirror_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + msg = 'Error deleting SnapMirror: %s' % to_native(error) + if self.previous_errors: + msg += '. Previous error(s): %s' % ' -- '.join(self.previous_errors) + self.module.fail_json(msg=msg, exception=traceback.format_exc()) + + def snapmirror_break(self, destination=None, before_delete=False): + """ + Break SnapMirror relationship at destination cluster + #1. Quiesce the SnapMirror relationship at destination + #2. Break the SnapMirror relationship at the destination + """ + self.snapmirror_quiesce() + + if self.use_rest: + if self.parameters['current_mirror_state'] == 'broken_off' or self.parameters['current_transfer_status'] == 'transferring': + self.na_helper.changed = False + self.module.fail_json(msg="snapmirror data are transferring") + return self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="broken_off", before_delete=before_delete) + if destination is None: + destination = self.parameters['destination_path'] + options = {'destination-location': destination} + snapmirror_break = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapmirror-break', **options) + try: + self.server.invoke_successfully(snapmirror_break, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + msg = 'Error breaking SnapMirror relationship: %s' % to_native(error) + if before_delete: + # record error but proceed with deletion + self.previous_errors.append(msg) + else: + self.module.fail_json(msg=msg, exception=traceback.format_exc()) + + def snapmirror_release(self): + """ + Release SnapMirror relationship from source cluster + """ + # if it's REST call, then not required to run release + if self.use_rest: + return + options = {'destination-location': self.parameters['destination_path'], + 'relationship-info-only': self.na_helper.get_value_for_bool(False, self.parameters['relationship_info_only'])} + snapmirror_release = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapmirror-release', **options) + try: + self.source_server.invoke_successfully(snapmirror_release, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error releasing SnapMirror relationship: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def snapmirror_abort(self): + """ + Abort a SnapMirror relationship in progress + """ + if self.use_rest: + return self.snapmirror_abort_rest() + + options = {'destination-location': self.parameters['destination_path']} + snapmirror_abort = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapmirror-abort', **options) + try: + self.server.invoke_successfully(snapmirror_abort, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error aborting SnapMirror relationship: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def snapmirror_initialize(self, current=None): + """ + Initialize SnapMirror based on relationship state + """ + if current and current['status'] == 'transferring' or self.parameters.get('current_transfer_status') == 'transferring': + # Operation already in progress, let's wait for it to end + current = self.wait_for_idle_status() + if not current: + current = self.snapmirror_get() + if self.use_rest: + if current['mirror_state'] == 'uninitialized' and current['status'] != 'transferring': + self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="snapmirrored") + self.wait_for_idle_status() + return + if current['mirror_state'] != 'snapmirrored': + initialize_zapi = 'snapmirror-initialize' + if self.parameters.get('relationship_type') and self.parameters['relationship_type'] == 'load_sharing': + initialize_zapi = 'snapmirror-initialize-ls-set' + options = {'source-location': self.parameters['source_path']} + else: + options = {'destination-location': self.parameters['destination_path']} + snapmirror_init = netapp_utils.zapi.NaElement.create_node_with_children( + initialize_zapi, **options) + try: + self.server.invoke_successfully(snapmirror_init, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error initializing SnapMirror: %s' + % (to_native(error)), + exception=traceback.format_exc()) + self.wait_for_idle_status() + + def snapmirror_resync(self): + """ + resync SnapMirror based on relationship state + """ + if self.use_rest: + self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="snapmirrored") + else: + options = {'destination-location': self.parameters['destination_path']} + snapmirror_resync = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resync', **options) + try: + self.server.invoke_successfully(snapmirror_resync, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error resyncing SnapMirror relationship: %s' % (to_native(error)), + exception=traceback.format_exc()) + self.wait_for_idle_status() + + def snapmirror_resume(self): + """ + resume SnapMirror based on relationship state + """ + if self.use_rest: + return self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="snapmirrored") + + options = {'destination-location': self.parameters['destination_path']} + snapmirror_resume = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-resume', **options) + try: + self.server.invoke_successfully(snapmirror_resume, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error resuming SnapMirror relationship: %s' % (to_native(error)), exception=traceback.format_exc()) + + def snapmirror_restore(self): + """ + restore SnapMirror based on relationship state + """ + if self.use_rest: + return self.snapmirror_restore_rest() + + options = {'destination-location': self.parameters['destination_path'], + 'source-location': self.parameters['source_path']} + if self.parameters.get('source_snapshot'): + options['source-snapshot'] = self.parameters['source_snapshot'] + if self.parameters.get('clean_up_failure'): + # only send it when True + options['clean-up-failure'] = self.na_helper.get_value_for_bool(False, self.parameters['clean_up_failure']) + snapmirror_restore = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-restore', **options) + try: + self.server.invoke_successfully(snapmirror_restore, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error restoring SnapMirror relationship: %s' % (to_native(error)), exception=traceback.format_exc()) + + def snapmirror_modify(self, modify): + """ + Modify SnapMirror schedule or policy + """ + if self.use_rest: + return self.snapmirror_mod_init_resync_break_quiesce_resume_rest(modify=modify) + + options = {'destination-location': self.parameters['destination_path']} + snapmirror_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapmirror-modify', **options) + param_to_zapi = { + 'schedule': 'schedule', + 'policy': 'policy', + 'max_transfer_rate': 'max-transfer-rate' + } + for param_key, value in modify.items(): + snapmirror_modify.add_new_child(param_to_zapi[param_key], str(value)) + try: + self.server.invoke_successfully(snapmirror_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying SnapMirror schedule or policy: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + def snapmirror_update(self, relationship_type): + """ + Update data in destination endpoint + """ + if self.use_rest: + return self.snapmirror_update_rest() + + zapi = 'snapmirror-update' + options = {'destination-location': self.parameters['destination_path']} + if relationship_type == 'load_sharing': + zapi = 'snapmirror-update-ls-set' + options = {'source-location': self.parameters['source_path']} + + snapmirror_update = netapp_utils.zapi.NaElement.create_node_with_children( + zapi, **options) + try: + self.server.invoke_successfully(snapmirror_update, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error updating SnapMirror: %s' + % (to_native(error)), + exception=traceback.format_exc()) + + @staticmethod + def new_option(option, prefix): + new_option_name = option[len(prefix):] + if new_option_name == 'vserver': + new_option_name = 'path (or svm)' + elif new_option_name == 'volume': + new_option_name = 'path' + return '%sendpoint:%s' % (prefix, new_option_name) + + def too_old(self, minimum_generation, minimum_major): + return not self.rest_api.meets_rest_minimum_version(self.use_rest, minimum_generation, minimum_major, 0) + + def set_new_style(self): + # if source_endpoint or destination_endpoint if present, both are required + # then sanitize inputs to support new style + if not self.parameters.get('destination_endpoint') or not self.parameters.get('source_endpoint'): + self.module.fail_json(msg='Missing parameters: Source endpoint or Destination endpoint') + # sanitize inputs + self.parameters['source_endpoint'] = self.na_helper.filter_out_none_entries(self.parameters['source_endpoint']) + self.parameters['destination_endpoint'] = self.na_helper.filter_out_none_entries(self.parameters['destination_endpoint']) + # options requiring 9.7 or better, and REST + ontap_97_options = ['cluster', 'ipspace'] + if self.too_old(9, 7) and any(x in self.parameters['source_endpoint'] for x in ontap_97_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7', use_rest=self.use_rest)) + if self.too_old(9, 7) and any(x in self.parameters['destination_endpoint'] for x in ontap_97_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7', use_rest=self.use_rest)) + # options requiring 9.8 or better, and REST + ontap_98_options = ['consistency_group_volumes'] + if self.too_old(9, 8) and any(x in self.parameters['source_endpoint'] for x in ontap_98_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_98_options, version='9.8', use_rest=self.use_rest)) + if self.too_old(9, 8) and any(x in self.parameters['destination_endpoint'] for x in ontap_98_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_98_options, version='9.8', use_rest=self.use_rest)) + # fill in old style parameters + self.parameters['source_cluster'] = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'cluster']) + self.parameters['source_path'] = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'path']) + self.parameters['source_vserver'] = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'svm']) + self.parameters['destination_cluster'] = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'cluster']) + self.parameters['destination_path'] = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'path']) + self.parameters['destination_vserver'] = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'svm']) + self.new_style = True + + def set_endpoints(self): + # use new structures for source and destination endpoints + for location in ('source', 'destination'): + endpoint = '%s_endpoint' % location + self.parameters[endpoint] = {} + # skipping svm for now, as it is not accepted and not needed with path + # for old, new in (('path', 'path'), ('vserver', 'svm'), ('cluster', 'cluster')): + for old, new in (('path', 'path'), ('cluster', 'cluster')): + value = self.parameters.get('%s_%s' % (location, old)) + if value is not None: + self.parameters[endpoint][new] = value + + def check_parameters(self): + """ + Validate parameters and fail if one or more required params are missing + Update source and destination path from vserver and volume parameters + """ + for option in ['source_cluster', 'source_path', 'source_volume', 'source_vserver']: + if option in self.parameters: + self.module.warn('option: %s is deprecated, please use %s' % (option, self.new_option(option, 'source_'))) + for option in ['destination_cluster', 'destination_path', 'destination_volume', 'destination_vserver']: + if option in self.parameters: + self.module.warn('option: %s is deprecated, please use %s' % (option, self.new_option(option, 'destination_'))) + + if self.parameters.get('source_endpoint') or self.parameters.get('destination_endpoint'): + self.set_new_style() + if self.parameters.get('source_path') or self.parameters.get('destination_path'): + if (not self.parameters.get('destination_path') or not self.parameters.get('source_path'))\ + and (self.parameters['state'] == 'present' or (self.parameters['state'] == 'absent' and not self.parameters.get('destination_path'))): + self.module.fail_json(msg='Missing parameters: Source path or Destination path') + elif self.parameters.get('source_volume'): + if not self.parameters.get('source_vserver') or not self.parameters.get('destination_vserver'): + self.module.fail_json(msg='Missing parameters: source vserver or destination vserver or both') + self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['source_volume'] + self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" +\ + self.parameters['destination_volume'] + elif self.parameters.get('source_vserver') and self.parameters.get('source_endpoint') is None: + self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" + + if self.use_rest and not self.new_style: + self.set_endpoints() + + def get_destination(self): + """ + get the destination info + # Note: REST module to get_destination is not required as it's used in only ZAPI. + """ + result = None + get_dest_iter = netapp_utils.zapi.NaElement('snapmirror-get-destination-iter') + query = netapp_utils.zapi.NaElement('query') + snapmirror_dest_info = netapp_utils.zapi.NaElement('snapmirror-destination-info') + snapmirror_dest_info.add_new_child('destination-location', self.parameters['destination_path']) + query.add_child_elem(snapmirror_dest_info) + get_dest_iter.add_child_elem(query) + try: + result = self.source_server.invoke_successfully(get_dest_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching snapmirror destinations info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) > 0: + return True + return None + + @staticmethod + def element_source_path_format_matches(value): + return re.match(pattern=r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\/lun\/[0-9]+", + string=value) + + def check_elementsw_parameters(self, kind='source'): + """ + Validate all ElementSW cluster parameters required for managing the SnapMirror relationship + Validate if both source and destination paths are present + Validate if source_path follows the required format + Validate SVIP + Validate if ElementSW volume exists + :return: None + """ + path = None + if kind == 'destination': + path = self.parameters.get('destination_path') + elif kind == 'source': + path = self.parameters.get('source_path') + if path is None: + self.module.fail_json(msg="Error: Missing required parameter %s_path for " + "connection_type %s" % (kind, self.parameters['connection_type'])) + if NetAppONTAPSnapmirror.element_source_path_format_matches(path) is None: + self.module.fail_json(msg="Error: invalid %s_path %s. " + "If the path is a ElementSW cluster, the value should be of the format" + " :/lun/" % (kind, path)) + # validate source_path + elementsw_helper, elem = self.set_element_connection(kind) + self.validate_elementsw_svip(path, elem) + self.check_if_elementsw_volume_exists(path, elementsw_helper) + + def validate_elementsw_svip(self, path, elem): + """ + Validate ElementSW cluster SVIP + :return: None + """ + result = None + try: + result = elem.get_cluster_info() + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error fetching SVIP", exception=to_native(err)) + if result and result.cluster_info.svip: + cluster_svip = result.cluster_info.svip + svip = path.split(':')[0] # split IP address from source_path + if svip != cluster_svip: + self.module.fail_json(msg="Error: Invalid SVIP") + + def check_if_elementsw_volume_exists(self, path, elementsw_helper): + """ + Check if remote ElementSW volume exists + :return: None + """ + volume_id, vol_id = None, path.split('/')[-1] + try: + volume_id = elementsw_helper.volume_id_exists(int(vol_id)) + except solidfire.common.ApiServerError as err: + self.module.fail_json(msg="Error fetching Volume details", exception=to_native(err)) + + if volume_id is None: + self.module.fail_json(msg="Error: Source volume does not exist in the ElementSW cluster") + + def check_health(self): + """ + Checking the health of the snapmirror + """ + if self.parameters.get('connection_type') == 'ontap_elementsw': + return + current = self.snapmirror_get() + if current is not None and not current.get('is_healthy', True): + msg = ['SnapMirror relationship exists but is not healthy.'] + if 'unhealthy_reason' in current: + msg.append('Unhealthy reason: %s' % current['unhealthy_reason']) + if 'last_transfer_error' in current: + msg.append('Last transfer error: %s' % current['last_transfer_error']) + self.module.warn(' '.join(msg)) + + def check_if_remote_volume_exists_rest(self): + """ + Check the remote volume exists using REST + """ + if self.src_use_rest: + if self.parameters.get('source_volume') is not None and self.parameters.get('source_vserver') is not None: + volume_name = self.parameters['source_volume'] + svm_name = self.parameters['source_vserver'] + options = {'name': volume_name, 'svm.name': svm_name, 'fields': 'name,svm.name'} + api = 'storage/volumes' + record, error = rest_generic.get_one_record(self.src_rest_api, api, options) + if error: + self.module.fail_json(msg='Error fetching source volume: %s' % error) + return record is not None + return False + self.module.fail_json(msg='REST is not supported on Source') + + def snapmirror_restore_rest(self): + ''' snapmirror restore using rest ''' + # Use the POST /api/snapmirror/relationships REST API call with the property "restore=true" to create the SnapMirror restore relationship + # Use the POST /api/snapmirror/relationships/{relationship.uuid}/transfers REST API call to start the restore transfer on the SnapMirror relationship + # run this API calls on Source cluster + # if the source_hostname is unknown, do not run snapmirror_restore + body = {'destination.path': self.parameters['destination_path'], 'source.path': self.parameters['source_path'], 'restore': 'true'} + api = 'snapmirror/relationships' + dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=120) + if error: + self.module.fail_json(msg='Error restoring SnapMirror: %s' % to_native(error), exception=traceback.format_exc()) + relationship_uuid = self.get_relationship_uuid() + # REST API call to start the restore transfer on the SnapMirror relationship + if relationship_uuid is None: + self.module.fail_json(msg="Error restoring SnapMirror: unable to get UUID for the SnapMirror relationship.") + + body = {'source_snapshot': self.parameters['source_snapshot']} if self.parameters.get('source_snapshot') else {} + api = 'snapmirror/relationships/%s/transfers' % relationship_uuid + dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=60, job_timeout=120) + if error: + self.module.fail_json(msg='Error restoring SnapMirror Transfer: %s' % to_native(error), exception=traceback.format_exc()) + + def get_relationship_uuid(self, after_create=True): + # this may be called after a create including restore, so we may need to fetch the data + if after_create and self.parameters.get('uuid') is None: + self.snapmirror_get() + return self.parameters.get('uuid') + + def snapmirror_mod_init_resync_break_quiesce_resume_rest(self, state=None, modify=None, before_delete=False): + """ + To perform SnapMirror modify, init, resume, resync and break. + 1. Modify only update SnapMirror policy which passes the policy in body. + 2. To perform SnapMirror init - state=snapmirrored and mirror_state=uninitialized. + 3. To perform SnapMirror resync - state=snapmirrored and mirror_state=broken_off. + 4. To perform SnapMirror break - state=broken_off and transfer_state not transferring. + 5. To perform SnapMirror quiesce - state=pause and mirror_state not broken_off. + 6. To perform SnapMirror resume - state=snapmirrored. + """ + uuid = self.get_relationship_uuid() + if uuid is None: + self.module.fail_json(msg="Error in updating SnapMirror relationship: unable to get UUID for the SnapMirror relationship.") + + body = {} + if state is not None: + body["state"] = state + elif modify: + for key in modify: + if key == 'policy': + body[key] = {"name": modify[key]} + elif key == 'schedule': + body['transfer_schedule'] = {"name": self.string_or_none(modify[key])} + else: + self.module.warn(msg="Unexpected key in modify: %s, value: %s" % (key, modify[key])) + else: + self.na_helper.changed = False + return + api = 'snapmirror/relationships' + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + msg = 'Error patching SnapMirror: %s: %s' % (body, to_native(error)) + if before_delete: + self.previous_errors.append(msg) + else: + self.module.fail_json(msg=msg, exception=traceback.format_exc()) + + def snapmirror_update_rest(self): + """ + Perform an update on the relationship using POST on /snapmirror/relationships/{relationship.uuid}/transfers + """ + uuid = self.get_relationship_uuid() + if uuid is None: + self.module.fail_json(msg="Error in updating SnapMirror relationship: unable to get UUID for the SnapMirror relationship.") + api = 'snapmirror/relationships/%s/transfers' % uuid + body = {} + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error updating SnapMirror relationship: %s:' % to_native(error), exception=traceback.format_exc()) + + def snapmirror_abort_rest(self): + """ + Abort a SnapMirror relationship in progress using REST + """ + uuid = self.get_relationship_uuid(after_create=False) + transfer_uuid = self.parameters.get('transfer_uuid') + if uuid is None or transfer_uuid is None: + self.module.fail_json(msg="Error in aborting SnapMirror: unable to get either uuid: %s or transfer_uuid: %s." % (uuid, transfer_uuid)) + api = 'snapmirror/relationships/%s/transfers' % uuid + body = {"state": "aborted"} + dummy, error = rest_generic.patch_async(self.rest_api, api, transfer_uuid, body) + if error: + self.module.fail_json(msg='Error aborting SnapMirror: %s' % to_native(error), exception=traceback.format_exc()) + + def snapmirror_quiesce_rest(self): + """ + SnapMirror quiesce using REST + """ + if (self.parameters['current_mirror_state'] == 'paused' + or self.parameters['current_mirror_state'] == 'broken_off' + or self.parameters['current_transfer_status'] == 'transferring'): + return + self.snapmirror_mod_init_resync_break_quiesce_resume_rest(state="paused") + self.wait_for_quiesced_status() + + def snapmirror_delete_rest(self): + """ + Delete SnapMirror relationship at destination cluster using REST + """ + uuid = self.get_relationship_uuid(after_create=False) + if uuid is None: + self.module.fail_json(msg='Error in deleting SnapMirror: %s, unable to get UUID for the SnapMirror relationship.' % uuid) + api = 'snapmirror/relationships' + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid) + if error: + msg = 'Error deleting SnapMirror: %s' % to_native(error) + if self.previous_errors: + msg += '. Previous error(s): %s' % ' -- '.join(self.previous_errors) + self.module.fail_json(msg=msg, exception=traceback.format_exc()) + + def snapmirror_rest_create(self): + """ + Create a SnapMirror relationship using REST + """ + body, initialized = self.get_create_body() + api = 'snapmirror/relationships' + dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=120) + if error: + self.module.fail_json(msg='Error creating SnapMirror: %s' % to_native(error), exception=traceback.format_exc()) + if self.parameters['initialize']: + if initialized: + self.wait_for_idle_status() + else: + self.snapmirror_initialize() + + def snapmirror_get_rest(self, destination=None): + """ Get the current snapmirror info """ + if destination is None and "destination_path" in self.parameters: + # check_param get the value if it's given in other format like destination_endpoint etc.. + destination = self.parameters['destination_path'] + + api = 'snapmirror/relationships' + fields = 'uuid,state,transfer.state,transfer.uuid,policy.name,unhealthy_reason.message,healthy,source' + if 'schedule' in self.parameters: + fields += ',transfer_schedule' + options = {'destination.path': destination, 'fields': fields} + record, error = rest_generic.get_one_record(self.rest_api, api, options) + if error: + self.module.fail_json(msg="Error getting SnapMirror %s: %s" % (destination, to_native(error)), + exception=traceback.format_exc()) + if record is not None: + snap_info = {} + self.parameters['uuid'] = self.na_helper.safe_get(record, ['uuid']) + self.parameters['transfer_uuid'] = self.na_helper.safe_get(record, ['transfer', 'uuid']) + self.parameters['current_mirror_state'] = self.na_helper.safe_get(record, ['state']) + snap_info['mirror_state'] = self.na_helper.safe_get(record, ['state']) + snap_info['status'] = self.na_helper.safe_get(record, ['transfer', 'state']) + self.parameters['current_transfer_status'] = self.na_helper.safe_get(record, ['transfer', 'state']) + snap_info['policy'] = self.na_helper.safe_get(record, ['policy', 'name']) + # REST API supports only Extended Data Protection (XDP) SnapMirror relationship + snap_info['relationship_type'] = 'extended_data_protection' + # initilized to avoid name keyerror + snap_info['current_transfer_type'] = "" + snap_info['max_transfer_rate'] = "" + if 'unhealthy_reason' in record: + snap_info['last_transfer_error'] = self.na_helper.safe_get(record, ['unhealthy_reason']) + snap_info['unhealthy_reason'] = self.na_helper.safe_get(record, ['unhealthy_reason']) + snap_info['is_healthy'] = self.na_helper.safe_get(record, ['healthy']) + snap_info['source_path'] = self.na_helper.safe_get(record, ['source', 'path']) + # if the field is absent, assume "" + snap_info['schedule'] = self.na_helper.safe_get(record, ['transfer_schedule', 'name']) or "" + return snap_info + return None + + def snapmirror_policy_rest_get(self, policy_name, svm_name): + """ + get policy type + There is a set of system level policies, and users can create their own for a SVM + REST does not return a svm entry for system policies + svm_name may not exist yet as it can be created when creating the snapmirror relationship + """ + policy_type = None + system_policy_type = None # policies not associated to a SVM + api = 'snapmirror/policies' + query = { + "name": policy_name, + "fields": "svm.name,type" + } + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query) + if error is None and records is not None: + for record in records: + if 'svm' in record: + if record['svm']['name'] == svm_name: + policy_type = record['type'] + break + else: + system_policy_type = record['type'] + if policy_type is None: + policy_type = system_policy_type + return policy_type, error + + def add_break_action(self, actions, current): + # If current is not None, it means the state is present otherwise we would take a delete action + if current and self.parameters['relationship_state'] == 'broken': + if current['mirror_state'] == 'uninitialized': + self.module.fail_json(msg='SnapMirror relationship cannot be broken if mirror state is uninitialized') + elif current['relationship_type'] in ['load_sharing', 'vault']: + self.module.fail_json(msg='SnapMirror break is not allowed in a load_sharing or vault relationship') + elif current['mirror_state'] not in ['broken-off', 'broken_off']: + actions.append('break') + self.na_helper.changed = True + + def add_active_actions(self, actions, current): + # add initialize or resume action as needed + # add resync or check_for_update action as needed + # If current is not None, it means the state is present otherwise we would take a delete action + if current and self.parameters['relationship_state'] == 'active': + # check for initialize + if self.parameters['initialize'] and current['mirror_state'] == 'uninitialized' and current['current_transfer_type'] != 'initialize': + actions.append('initialize') + # set changed explicitly for initialize + self.na_helper.changed = True + # resume when state is quiesced + if current['status'] == 'quiesced' or current['mirror_state'] == 'paused': + actions.append('resume') + # set changed explicitly for resume + self.na_helper.changed = True + # resync when state is broken-off + if current['mirror_state'] in ['broken-off', 'broken_off']: + actions.append('resync') + # set changed explicitly for resync + self.na_helper.changed = True + # Update when create is called again, or modify is being called + elif self.parameters['update']: + actions.append('check_for_update') + + def get_svm_peer(self, source_svm, destination_svm): + if self.use_rest: + api = 'svm/peers' + query = {'name': source_svm, 'svm.name': destination_svm} + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields='peer') + if error: + self.module.fail_json(msg='Error retrieving SVM peer: %s' % error) + if record: + return self.na_helper.safe_get(record, ['peer', 'svm', 'name']), self.na_helper.safe_get(record, ['peer', 'cluster', 'name']) + else: + query = { + 'query': { + 'vserver-peer-info': { + 'peer-vserver': source_svm, + 'vserver': destination_svm + } + } + } + get_request = netapp_utils.zapi.NaElement('vserver-peer-get-iter') + get_request.translate_struct(query) + try: + result = self.server.invoke_successfully(get_request, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching vserver peer info: %s' % to_native(error), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info') + return info['remote-vserver-name'], info['peer-cluster'] + + return None, None + + def validate_source_path(self, current): + """ There can only be one destination, so we use it as the key + But we want to make sure another relationship is not already using the destination + It's a bit complicated as the source SVM name can be aliased to a local name if there are conflicts + So the source can be ansibleSVM: and show locally as ansibleSVM: if there is not conflict or ansibleSVM.1: + or any alias the user likes. + And in the input paramters, it may use the remote name or local alias. + """ + if not current: + return + source_path = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'path']) or self.parameters.get('source_path') + destination_path = self.na_helper.safe_get(self.parameters, ['destination_endpoint', 'path']) or self.parameters.get('destination_path') + source_cluster = self.na_helper.safe_get(self.parameters, ['source_endpoint', 'cluster']) or self.parameters.get('source_cluster') + current_source_path = current.pop('source_path', None) + if source_path and current_source_path and self.parameters.get('validate_source_path'): + if self.parameters['connection_type'] != 'ontap_ontap': + # take things at face value + if current_source_path != source_path: + self.module.fail_json(msg='Error: another relationship is present for the same destination with source_path:' + ' "%s". Desired: %s on %s' + % (current_source_path, source_path, source_cluster)) + return + # with ONTAP -> ONTAP, vserver names can be aliased + current_source_svm, dummy, dummy = current_source_path.rpartition(':') + if not current_source_svm: + self.module.warn('Unexpected source path: %s, skipping validation.' % current_source_path) + destination_svm, dummy, dummy = destination_path.rpartition(':') + if not destination_svm: + self.module.warn('Unexpected destination path: %s, skipping validation.' % destination_path) + if not current_source_svm or not destination_svm: + return + peer_svm, peer_cluster = self.get_svm_peer(current_source_svm, destination_svm) + if peer_svm is not None: + real_source_path = current_source_path.replace(current_source_svm, peer_svm, 1) + # match either the local name or the remote name + if (real_source_path != source_path and current_source_path != source_path)\ + or (peer_cluster is not None and source_cluster is not None and source_cluster != peer_cluster): + self.module.fail_json(msg='Error: another relationship is present for the same destination with source_path:' + ' "%s" (%s on cluster %s). Desired: %s on %s' + % (current_source_path, real_source_path, peer_cluster, source_path, source_cluster)) + + def get_actions(self): + restore = self.parameters.get('relationship_type', '') == 'restore' + current = None if restore else self.snapmirror_get() + self.validate_source_path(current) + # ONTAP automatically convert DP to XDP + if current and current['relationship_type'] == 'extended_data_protection' and self.parameters.get('relationship_type') == 'data_protection': + self.parameters['relationship_type'] = 'extended_data_protection' + cd_action = None if restore else self.na_helper.get_cd_action(current, self.parameters) + modify = None + if cd_action is None and self.parameters['state'] == 'present' and not restore: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify and 'relationship_type' in modify: + self.module.fail_json(msg='Error: cannot modify relationship_type from %s to %s.' % + (current['relationship_type'], modify['relationship_type'])) + actions = [] + if self.parameters['state'] == 'present' and restore: + actions.append('restore') + self.na_helper.changed = True + elif cd_action == 'create': + actions.append('create') + elif cd_action == 'delete': + if current['status'] == 'transferring' or self.parameters.get('current_transfer_status') == 'transferring': + actions.append('abort') + actions.append('delete') + else: + if modify: + actions.append('modify') + # If current is not None, it means the state is present otherwise we would take a delete action + self.add_break_action(actions, current) + self.add_active_actions(actions, current) + return actions, current, modify + + def take_actions(self, actions, current, modify): + if 'restore' in actions: + self.snapmirror_restore() + if 'create' in actions: + self.snapmirror_create() + if 'abort' in actions: + self.snapmirror_abort() + self.wait_for_idle_status() + if 'delete' in actions: + self.delete_snapmirror(current['relationship_type'], current['mirror_state']) + if 'modify' in actions: + self.snapmirror_modify(modify) + if 'break' in actions: + self.snapmirror_break() + if 'initialize' in actions: + self.snapmirror_initialize(current) + if 'resume' in actions: + self.snapmirror_resume() + if 'resync' in actions: + self.snapmirror_resync() + + def apply(self): + """ + Apply action to SnapMirror + """ + # source is ElementSW + if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'elementsw_ontap': + self.check_elementsw_parameters() + elif self.parameters.get('connection_type') == 'ontap_elementsw': + self.check_elementsw_parameters('destination') + else: + self.check_parameters() + if self.parameters['state'] == 'present' and self.parameters.get('connection_type') == 'ontap_elementsw': + current_elementsw_ontap = self.snapmirror_get(self.parameters['source_path']) + if current_elementsw_ontap is None: + self.module.fail_json(msg='Error: creating an ONTAP to ElementSW snapmirror relationship requires an ' + 'established SnapMirror relation from ElementSW to ONTAP cluster') + + actions, current, modify = self.get_actions() + if self.na_helper.changed and not self.module.check_mode: + self.take_actions(actions, current, modify) + if 'check_for_update' in actions: + current = self.snapmirror_get() + if current['mirror_state'] == 'snapmirrored': + actions.append('update') + if not self.module.check_mode: + self.snapmirror_update(current['relationship_type']) + self.na_helper.changed = True + + self.check_health() + if self.previous_errors: + self.module.warn('Ignored error(s): %s' % ' -- '.join(self.previous_errors)) + + results = dict(changed=self.na_helper.changed) + if actions: + results['actions'] = actions + self.module.exit_json(**results) + + +def main(): + """Execute action""" + community_obj = NetAppONTAPSnapmirror() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py new file mode 100644 index 000000000..9c9f371e3 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapmirror_policy.py @@ -0,0 +1,1038 @@ +#!/usr/bin/python + +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_snapmirror_policy +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_snapmirror_policy +short_description: NetApp ONTAP create, delete or modify SnapMirror policies +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.3.0' +author: NetApp Ansible Team (@carchi8py) +description: + - NetApp ONTAP create, modify, or destroy the SnapMirror policy + - Add, modify and remove SnapMirror policy rules + - Following parameters are not supported in REST; 'owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime', 'common_snapshot_schedule' +options: + state: + description: + - Whether the specified SnapMirror policy should exist or not. + choices: ['present', 'absent'] + default: present + type: str + vserver: + description: + - Specifies the vserver for the SnapMirror policy. + - Required with ZAPI. + - Name of a data vserver with REST. + - With current versions of ONTAP, when using REST, this must be set to the cluster name for cluster scoped policies (9.12.1 and older). + - Current versions of ONTAP fail with "svm.uuid" is required when the vserver field is not set. + - With newer versions of ONTAP, omit the value, or omit this option for a cluster scoped policy with REST. + type: str + policy_name: + description: + - Specifies the SnapMirror policy name. + - C(name) added as an alias in 22.0.0. + required: true + type: str + aliases: ['name'] + version_added: '22.0.0' + policy_type: + description: + - Specifies the SnapMirror policy type. Modifying the type of an existing SnapMirror policy is not supported. + - The Policy types 'sync' and 'async' are only supported in REST. + choices: ['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror', 'sync', 'async'] + type: str + comment: + description: + - Specifies the SnapMirror policy comment. + type: str + tries: + description: + - Specifies the number of tries. + - Not supported with REST. + type: str + transfer_priority: + description: + - Specifies the priority at which a SnapMirror transfer runs. + - Not supported with REST. + choices: ['low', 'normal'] + type: str + transfer_schedule: + description: + - Specifies the name of the schedule used to update asynchronous SnapMirror relationships. + - Not supported with ZAPI. + type: str + version_added: '22.2.0' + common_snapshot_schedule: + description: + - Specifies the common Snapshot copy schedule associated with the policy, only required for strict_sync_mirror and sync_mirror. + - Not supported with REST. + type: str + owner: + description: + - Specifies the owner of the SnapMirror policy. + - Not supported with REST. + choices: ['cluster_admin', 'vserver_admin'] + type: str + is_network_compression_enabled: + description: + - Specifies whether network compression is enabled for transfers. + type: bool + ignore_atime: + description: + - Specifies whether incremental transfers will ignore files which have only their access time changed. Applies to SnapMirror vault relationships only. + - Not supported with REST. + type: bool + restart: + description: + - Defines the behavior of SnapMirror if an interrupted transfer exists, applies to data protection only. + - Not supported with REST. + choices: ['always', 'never', 'default'] + type: str + snapmirror_label: + description: + - SnapMirror policy rule label. + - Required when defining policy rules. + - Use an empty list to remove all user-defined rules. + type: list + elements: str + version_added: '20.7.0' + keep: + description: + - SnapMirror policy rule retention count for snapshots created. + - Required when defining policy rules. + type: list + elements: int + version_added: '20.7.0' + prefix: + description: + - SnapMirror policy rule prefix. + - Optional when defining policy rules. + - Set to '' to not set or remove an existing custom prefix. + - Prefix name should be unique within the policy. + - When specifying a custom prefix, schedule must also be specified. + type: list + elements: str + version_added: '20.7.0' + schedule: + description: + - SnapMirror policy rule schedule. + - Optional when defining policy rules. + - Set to '' to not set or remove a schedule. + - When specifying a schedule a custom prefix can be set otherwise the prefix will be set to snapmirror_label. + type: list + elements: str + version_added: '20.7.0' + identity_preservation: + description: + - Specifies which configuration of the source SVM is replicated to the destination SVM. + - This property is applicable only for SVM data protection with "async" policy type. + - Only supported with REST. + type: str + choices: ['full', 'exclude_network_config', 'exclude_network_and_protocol_config'] + version_added: '22.0.0' + copy_all_source_snapshots: + description: + - Specifies whether all source Snapshot copies should be copied to the destination on a transfer rather than specifying specific retentions. + - This property is applicable only to async policies. + - Property can only be set to 'true'. + - Only supported with REST and requires ONTAP 9.10.1 or later. + type: bool + version_added: '22.1.0' + copy_latest_source_snapshot: + description: + - Specifies that the latest source Snapshot copy (created by SnapMirror before the transfer begins) should be copied to the destination on a transfer. + - Retention properties cannot be specified along with this property. + - Property can only be set to 'true'. + - Only supported with REST and requires ONTAP 9.11.1 or later. + type: bool + version_added: '22.2.0' + create_snapshot_on_source: + description: + - Specifies whether a new Snapshot copy should be created on the source at the beginning of an update or resync operation. + - This property is applicable only to async policies. + - Property can only be set to 'false'. + - Only supported with REST and requires ONTAP 9.11.1 or later. + type: bool + version_added: '22.2.0' + sync_type: + description: + - This property is only applicable to sync policy types. + - If the "sync_type" is "sync" then a write success is returned to the client + after writing the data to the primary endpoint and before writing the data to the secondary endpoint. + - If the "sync_type" is "strict_sync" then a write success is returned to the client after writing the data to the both primary and secondary endpoints. + - The "sync_type" of "automated_failover" can be associated with a SnapMirror relationship that has Consistency Group as the endpoint and + it requires ONTAP 9.7 or later. + - Only supported with REST. + type: str + choices: ['sync', 'strict_sync', 'automated_failover'] + version_added: '22.2.0' + +notes: + - In REST, policy types 'mirror_vault', 'vault' and 'async_mirror' are mapped to 'async' policy_type. + - In REST, policy types 'sync_mirror' and 'strict_sync_mirror' are mapped to 'sync' policy_type. + - In REST, use policy_type 'async' to configure 'mirror-vault' in CLI. + - In REST, use policy_type 'async' with 'copy_all_source_snapshots' to configure 'async-mirror' with + 'all_source_snapshots' in CLI. + - In REST, use policy_type 'async' with 'copy_latest_source_snapshot' to configure 'async-mirror' without + 'all_source_snapshots' in CLI. + - In REST, use policy_type 'async' with 'create_snapshot_on_source' to configure 'vault' in CLI. + - In REST, use policy_type 'sync' with sync_type 'sync' to configure 'sync-mirror' in CLI. + - In REST, use policy_type 'sync' with sync_type 'strict_sync' to configure 'strict-sync-mirror' in CLI. + - In REST, use policy_type 'sync' with sync_type 'automated_failover' to configure 'automated-failover' in CLI. +""" + +EXAMPLES = """ + - name: Create SnapMirror policy + na_ontap_snapmirror_policy: + state: present + vserver: "SVM1" + policy_name: "ansible_policy" + policy_type: "mirror_vault" + comment: "created by ansible" + transfer_schedule: "daily" # when using REST + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Modify SnapMirror policy + na_ontap_snapmirror_policy: + state: present + vserver: "SVM1" + policy_name: "ansible_policy" + policy_type: "async_mirror" + transfer_priority: "low" + transfer_schedule: "weekly" # when using REST + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Create SnapMirror policy with basic rules + na_ontap_snapmirror_policy: + state: present + vserver: "SVM1" + policy_name: "ansible_policy" + policy_type: "async_mirror" + snapmirror_label: ['daily', 'weekly', 'monthly'] + keep: [7, 5, 12] + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Create SnapMirror policy with rules and schedules (no schedule for daily rule) + na_ontap_snapmirror_policy: + state: present + vserver: "SVM1" + policy_name: "ansible_policy" + policy_type: "mirror_vault" + snapmirror_label: ['daily', 'weekly', 'monthly'] + keep: [7, 5, 12] + schedule: ['','weekly','monthly'] + prefix: ['','','monthly_mv'] + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Modify SnapMirror policy with rules, remove existing schedules and prefixes + na_ontap_snapmirror_policy: + state: present + vserver: "SVM1" + policy_name: "ansible_policy" + policy_type: "mirror_vault" + snapmirror_label: ['daily', 'weekly', 'monthly'] + keep: [7, 5, 12] + schedule: ['','',''] + prefix: ['','',''] + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Modify SnapMirror policy, delete all rules (excludes builtin rules) + na_ontap_snapmirror_policy: + state: present + vserver: "SVM1" + policy_name: "ansible_policy" + policy_type: "mirror_vault" + snapmirror_label: [] + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Delete SnapMirror policy + na_ontap_snapmirror_policy: + state: absent + vserver: "SVM1" + policy_type: "async_mirror" + policy_name: "ansible_policy" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapSnapMirrorPolicy: + """ + Create, Modifies and Destroys a SnapMirror policy + """ + def __init__(self): + """ + Initialize the Ontap SnapMirror policy class + """ + + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=False, type='str'), + policy_name=dict(required=True, type='str', aliases=['name']), + comment=dict(required=False, type='str'), + policy_type=dict(required=False, type='str', + choices=['vault', 'async_mirror', 'mirror_vault', 'strict_sync_mirror', 'sync_mirror', 'sync', 'async']), + tries=dict(required=False, type='str'), + transfer_priority=dict(required=False, type='str', choices=['low', 'normal']), + transfer_schedule=dict(required=False, type='str'), + common_snapshot_schedule=dict(required=False, type='str'), + ignore_atime=dict(required=False, type='bool'), + is_network_compression_enabled=dict(required=False, type='bool'), + owner=dict(required=False, type='str', choices=['cluster_admin', 'vserver_admin']), + restart=dict(required=False, type='str', choices=['always', 'never', 'default']), + snapmirror_label=dict(required=False, type="list", elements="str"), + keep=dict(required=False, type="list", elements="int"), + prefix=dict(required=False, type="list", elements="str"), + schedule=dict(required=False, type="list", elements="str"), + identity_preservation=dict(required=False, type="str", choices=['full', 'exclude_network_config', 'exclude_network_and_protocol_config']), + copy_all_source_snapshots=dict(required=False, type='bool'), + copy_latest_source_snapshot=dict(required=False, type='bool'), + create_snapshot_on_source=dict(required=False, type='bool'), + sync_type=dict(required=False, type="str", choices=['sync', 'strict_sync', 'automated_failover']), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('copy_all_source_snapshots', 'copy_latest_source_snapshot', 'create_snapshot_on_source', 'sync_type')] + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # API should be used for ONTAP 9.6 or higher, Zapi for lower version + self.rest_api = netapp_utils.OntapRestAPI(self.module) + # some attributes are not supported in earlier REST implementation + unsupported_rest_properties = ['owner', 'restart', 'transfer_priority', 'tries', 'ignore_atime', + 'common_snapshot_schedule'] + partially_supported_rest_properties = [['copy_all_source_snapshots', (9, 10, 1)], ['copy_latest_source_snapshot', (9, 11, 1)], + ['create_snapshot_on_source', (9, 11, 1)]] + self.unsupported_zapi_properties = ['identity_preservation', 'copy_all_source_snapshots', 'copy_latest_source_snapshot', 'sync_type', + 'transfer_schedule'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + self.validate_policy_type() + if self.use_rest: + self.scope = self.set_scope() + else: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + for unsupported_zapi_property in self.unsupported_zapi_properties: + if self.parameters.get(unsupported_zapi_property) is not None: + msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property + self.module.fail_json(msg=msg) + if 'vserver' not in self.parameters: + self.module.fail_json(msg="Error: vserver is a required parameter when using ZAPI.") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def set_scope(self): + if self.parameters.get('vserver') is None: + return 'cluster' + record, error = rest_vserver.get_vserver(self.rest_api, self.parameters['vserver']) + if error: + self.module.fail_json(msg='Error getting vserver %s info: %s' % (self.parameters['vserver'], error)) + if record: + return 'svm' + self.module.warn("vserver %s is not a data vserver, assuming cluster scope" % self.parameters['vserver']) + return 'cluster' + + def get_snapmirror_policy(self): + if self.use_rest: + return self.get_snapmirror_policy_rest() + + snapmirror_policy_get_iter = netapp_utils.zapi.NaElement('snapmirror-policy-get-iter') + snapmirror_policy_info = netapp_utils.zapi.NaElement('snapmirror-policy-info') + snapmirror_policy_info.add_new_child('policy-name', self.parameters['policy_name']) + snapmirror_policy_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(snapmirror_policy_info) + snapmirror_policy_get_iter.add_child_elem(query) + + try: + result = self.server.invoke_successfully(snapmirror_policy_get_iter, True) + except netapp_utils.zapi.NaApiError as error: + if 'NetApp API failed. Reason - 13001:' in to_native(error): + # policy does not exist + return None + self.module.fail_json(msg='Error getting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + return_value = None + if result and result.get_child_by_name('attributes-list'): + snapmirror_policy_attributes = result['attributes-list']['snapmirror-policy-info'] + + return_value = { + 'policy_name': snapmirror_policy_attributes['policy-name'], + 'tries': snapmirror_policy_attributes['tries'], + 'transfer_priority': snapmirror_policy_attributes['transfer-priority'], + 'is_network_compression_enabled': self.na_helper.get_value_for_bool(True, + snapmirror_policy_attributes['is-network-compression-enabled']), + 'restart': snapmirror_policy_attributes['restart'], + 'ignore_atime': self.na_helper.get_value_for_bool(True, snapmirror_policy_attributes['ignore-atime']), + 'vserver': snapmirror_policy_attributes['vserver-name'], + 'comment': '', + 'snapmirror_label': [], + 'keep': [], + 'prefix': [], + 'schedule': [], + } + if snapmirror_policy_attributes.get_child_content('comment') is not None: + return_value['comment'] = snapmirror_policy_attributes['comment'] + + if snapmirror_policy_attributes.get_child_content('type') is not None: + return_value['policy_type'] = snapmirror_policy_attributes['type'] + + if snapmirror_policy_attributes.get_child_content('common-snapshot-schedule') is not None: + return_value['common_snapshot_schedule'] = snapmirror_policy_attributes['common-snapshot-schedule'] + + if snapmirror_policy_attributes.get_child_by_name('snapmirror-policy-rules'): + for rule in snapmirror_policy_attributes['snapmirror-policy-rules'].get_children(): + # Ignore builtin rules + if rule.get_child_content('snapmirror-label') in ["sm_created", "all_source_snapshots"]: + continue + + return_value['snapmirror_label'].append(rule.get_child_content('snapmirror-label')) + return_value['keep'].append(int(rule.get_child_content('keep'))) + + prefix = rule.get_child_content('prefix') + if prefix is None or prefix == '-': + prefix = '' + return_value['prefix'].append(prefix) + + schedule = rule.get_child_content('schedule') + if schedule is None or schedule == '-': + schedule = '' + return_value['schedule'].append(schedule) + + return return_value + + def get_snapmirror_policy_rest(self): + query = {'fields': 'uuid,name,svm.name,comment,network_compression_enabled,type,retention,identity_preservation,sync_type,transfer_schedule,', + 'name': self.parameters['policy_name'], + 'scope': self.scope} + if self.scope == 'svm': + query['svm.name'] = self.parameters['vserver'] + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + query['fields'] += 'copy_all_source_snapshots,' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1): + query['fields'] += 'copy_latest_source_snapshot,create_snapshot_on_source' + api = "snapmirror/policies" + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error getting snapmirror policy: %s' % error) + return self.format_record(record) if record else None + + def format_record(self, record): + return_value = { + 'uuid': record['uuid'], + 'vserver': self.na_helper.safe_get(record, ['svm', 'name']), + 'policy_name': record['name'], + 'comment': '', + 'is_network_compression_enabled': False, + 'snapmirror_label': [], + 'keep': [], + 'prefix': [], + 'schedule': [], + 'identity_preservation': '', + 'copy_all_source_snapshots': False, + 'copy_latest_source_snapshot': False, + 'transfer_schedule': '', + } + if 'type' in record: + return_value['policy_type'] = record['type'] + if 'network_compression_enabled' in record: + return_value['is_network_compression_enabled'] = record['network_compression_enabled'] + if 'comment' in record: + return_value['comment'] = record['comment'] + if 'retention' in record: + for rule in record['retention']: + return_value['snapmirror_label'].append(rule['label']) + return_value['keep'].append(int(rule['count'])) + if 'prefix' in rule and rule['prefix'] != '-': + return_value['prefix'].append(rule['prefix']) + else: + return_value['prefix'].append('') + if 'creation_schedule' in rule and rule['creation_schedule']['name'] != '-': + return_value['schedule'].append(rule['creation_schedule']['name']) + else: + return_value['schedule'].append('') + if 'identity_preservation' in record: + return_value['identity_preservation'] = record['identity_preservation'] + if 'sync_type' in record: + return_value['sync_type'] = record['sync_type'] + if 'copy_all_source_snapshots' in record: + return_value['copy_all_source_snapshots'] = record['copy_all_source_snapshots'] + if 'copy_latest_source_snapshot' in record: + return_value['copy_latest_source_snapshot'] = record['copy_latest_source_snapshot'] + if 'create_snapshot_on_source' in record: + return_value['create_snapshot_on_source'] = record['create_snapshot_on_source'] + if 'transfer_schedule' in record: + return_value['transfer_schedule'] = record['transfer_schedule']['name'] + return return_value + + def validate_parameters(self): + """ + Validate snapmirror policy rules + :return: None + """ + + # For snapmirror policy rules, 'snapmirror_label' is required. + if 'snapmirror_label' in self.parameters: + + # Check size of 'snapmirror_label' list is 0-10. Can have zero rules. + # Take builtin 'sm_created' rule into account for 'mirror_vault'. + if (('policy_type' in self.parameters and self.parameters['policy_type'] == 'mirror_vault' and len(self.parameters['snapmirror_label']) > 9) + or len(self.parameters['snapmirror_label']) > 10): + self.module.fail_json(msg="Error: A SnapMirror Policy can have up to a maximum of " + "10 rules (including builtin rules), with a 'keep' value " + "representing the maximum number of Snapshot copies for each rule") + + # 'keep' must be supplied as long as there is at least one snapmirror_label + if len(self.parameters['snapmirror_label']) > 0 and 'keep' not in self.parameters: + self.module.fail_json(msg="Error: Missing 'keep' parameter. When specifying the " + "'snapmirror_label' parameter, the 'keep' parameter must " + "also be supplied") + + # Make sure other rule values match same number of 'snapmirror_label' values. + for rule_parameter in ['keep', 'prefix', 'schedule']: + if rule_parameter in self.parameters: + if len(self.parameters['snapmirror_label']) > len(self.parameters[rule_parameter]): + self.module.fail_json(msg="Error: Each 'snapmirror_label' value must have " + "an accompanying '%s' value" % rule_parameter) + if len(self.parameters[rule_parameter]) > len(self.parameters['snapmirror_label']): + self.module.fail_json(msg="Error: Each '%s' value must have an accompanying " + "'snapmirror_label' value" % rule_parameter) + else: + # 'snapmirror_label' not supplied. + # Bail out if other rule parameters have been supplied. + for rule_parameter in ['keep', 'prefix', 'schedule']: + if rule_parameter in self.parameters: + self.module.fail_json(msg="Error: Missing 'snapmirror_label' parameter. When " + "specifying the '%s' parameter, the 'snapmirror_label' " + "parameter must also be supplied" % rule_parameter) + + # Schedule must be supplied if prefix is supplied. + if 'prefix' in self.parameters and 'schedule' not in self.parameters: + self.module.fail_json(msg="Error: Missing 'schedule' parameter. When " + "specifying the 'prefix' parameter, the 'schedule' " + "parameter must also be supplied") + + def create_snapmirror_policy(self, body=None): + """ + Creates a new storage efficiency policy + """ + if self.use_rest: + api = "snapmirror/policies" + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating snapmirror policy: %s' % error) + else: + snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-create") + snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name']) + if 'policy_type' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("type", self.parameters['policy_type']) + snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj) + + try: + self.server.invoke_successfully(snapmirror_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def create_snapmirror_policy_obj(self, snapmirror_policy_obj): + if 'comment' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("comment", self.parameters['comment']) + if 'common_snapshot_schedule' in self.parameters.keys() and self.parameters['policy_type'] in ('sync_mirror', 'strict_sync_mirror'): + snapmirror_policy_obj.add_new_child("common-snapshot-schedule", self.parameters['common_snapshot_schedule']) + if 'ignore_atime' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("ignore-atime", self.na_helper.get_value_for_bool(False, self.parameters['ignore_atime'])) + if 'is_network_compression_enabled' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("is-network-compression-enabled", + self.na_helper.get_value_for_bool(False, self.parameters['is_network_compression_enabled'])) + if 'owner' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("owner", self.parameters['owner']) + if 'restart' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("restart", self.parameters['restart']) + if 'transfer_priority' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("transfer-priority", self.parameters['transfer_priority']) + if 'tries' in self.parameters.keys(): + snapmirror_policy_obj.add_new_child("tries", self.parameters['tries']) + return snapmirror_policy_obj + + def build_body_for_create(self): + + body = {'name': self.parameters['policy_name']} + if self.parameters.get('vserver') is not None: + body['svm'] = {'name': self.parameters['vserver']} + # if policy type is omitted, REST assumes async + policy_type = 'async' + if 'policy_type' in self.parameters: + if 'async' in self.parameters['policy_type']: + policy_type = 'async' + elif 'sync' in self.parameters['policy_type']: + policy_type = 'sync' + body['sync_type'] = 'sync' + if 'sync_type' in self.parameters: + body['sync_type'] = self.parameters['sync_type'] + body['type'] = policy_type + if 'copy_all_source_snapshots' in self.parameters: + body["copy_all_source_snapshots"] = self.parameters['copy_all_source_snapshots'] + if 'copy_latest_source_snapshot' in self.parameters: + body["copy_latest_source_snapshot"] = self.parameters['copy_latest_source_snapshot'] + if 'create_snapshot_on_source' in self.parameters: + # To set 'create_snapshot_on_source' as 'False' requires retention objects label(snapmirror_label) and count(keep) + snapmirror_policy_retention_objs = [] + for index, rule in enumerate(self.parameters['snapmirror_label']): + retention = {'label': rule, 'count': str(self.parameters['keep'][index])} + if 'prefix' in self.parameters and self.parameters['prefix'] != '': + retention['prefix'] = self.parameters['prefix'][index] + if 'schedule' in self.parameters and self.parameters['schedule'] != '': + retention['creation_schedule'] = {'name': self.parameters['schedule'][index]} + snapmirror_policy_retention_objs.append(retention) + body['retention'] = snapmirror_policy_retention_objs + body["create_snapshot_on_source"] = self.parameters['create_snapshot_on_source'] + + return self.build_body_for_create_or_modify(policy_type, body) + + def build_body_for_create_or_modify(self, policy_type, body=None): + + if body is None: + body = {} + if 'comment' in self.parameters.keys(): + body["comment"] = self.parameters['comment'] + if 'is_network_compression_enabled' in self.parameters: + if policy_type == 'sync': + self.module.fail_json(msg="Error: input parameter network_compression_enabled is not valid for SnapMirror policy type sync") + body["network_compression_enabled"] = self.parameters['is_network_compression_enabled'] + for option in ('identity_preservation', 'transfer_schedule'): + if option in self.parameters: + if policy_type == 'sync': + self.module.fail_json(msg='Error: %s is only supported with async (async) policy_type, got: %s' + % (option, self.parameters['policy_type'])) + body[option] = self.parameters[option] + return body + + def create_snapmirror_policy_retention_obj_for_rest(self, rules=None): + """ + Create SnapMirror policy retention REST object. + :param list rules: e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ] + :return: List of retention REST objects. + e.g. [{'label': 'daily', 'count': 7, 'prefix': 'daily', 'creation_schedule': {'name': 'daily'}}, ... ] + """ + snapmirror_policy_retention_objs = [] + if rules is not None: + for rule in rules: + retention = {'label': rule['snapmirror_label'], 'count': str(rule['keep'])} + if 'prefix' in rule and rule['prefix'] != '': + retention['prefix'] = rule['prefix'] + if 'schedule' in rule and rule['schedule'] != '': + retention['creation_schedule'] = {'name': rule['schedule']} + snapmirror_policy_retention_objs.append(retention) + return snapmirror_policy_retention_objs + + def delete_snapmirror_policy(self, uuid=None): + """ + Deletes a snapmirror policy + """ + if self.use_rest: + api = "snapmirror/policies" + dummy, error = rest_generic.delete_async(self.rest_api, api, uuid) + if error: + self.module.fail_json(msg='Error deleting snapmirror policy: %s' % error) + else: + snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-delete") + snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name']) + + try: + self.server.invoke_successfully(snapmirror_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_snapmirror_policy(self, uuid=None, body=None): + """ + Modifies a snapmirror policy + """ + if self.use_rest: + if not body: + return + api = "snapmirror/policies" + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg='Error modifying snapmirror policy: %s' % error) + else: + snapmirror_policy_obj = netapp_utils.zapi.NaElement("snapmirror-policy-modify") + snapmirror_policy_obj = self.create_snapmirror_policy_obj(snapmirror_policy_obj) + # Only modify snapmirror policy if a specific snapmirror policy attribute needs + # modifying. It may be that only snapmirror policy rules are being modified. + if snapmirror_policy_obj.get_children(): + snapmirror_policy_obj.add_new_child("policy-name", self.parameters['policy_name']) + + try: + self.server.invoke_successfully(snapmirror_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying snapmirror policy %s: %s' % (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def identify_new_snapmirror_policy_rules(self, current=None): + """ + Identify new rules that should be added. + :return: List of new rules to be added + e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ] + """ + new_rules = [] + if 'snapmirror_label' in self.parameters: + for snapmirror_label in self.parameters['snapmirror_label']: + snapmirror_label = snapmirror_label.strip() + + # Construct new rule. prefix and schedule are optional. + snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label) + rule = dict({ + 'snapmirror_label': snapmirror_label, + 'keep': self.parameters['keep'][snapmirror_label_index] + }) + if 'prefix' in self.parameters: + rule['prefix'] = self.parameters['prefix'][snapmirror_label_index] + else: + rule['prefix'] = '' + if 'schedule' in self.parameters: + rule['schedule'] = self.parameters['schedule'][snapmirror_label_index] + else: + rule['schedule'] = '' + + if current is None or 'snapmirror_label' not in current or snapmirror_label not in current['snapmirror_label']: + # Rule doesn't exist. Add new rule. + new_rules.append(rule) + return new_rules + + def identify_obsolete_snapmirror_policy_rules(self, current=None): + """ + Identify existing rules that should be deleted + :return: List of rules to be deleted + e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ] + """ + obsolete_rules = [] + if 'snapmirror_label' in self.parameters and current is not None and 'snapmirror_label' in current: + # Iterate existing rules. + for snapmirror_label in current['snapmirror_label']: + snapmirror_label = snapmirror_label.strip() + if snapmirror_label not in [item.strip() for item in self.parameters['snapmirror_label']]: + # Existing rule isn't in parameters. Delete existing rule. + current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label) + rule = dict({ + 'snapmirror_label': snapmirror_label, + 'keep': current['keep'][current_snapmirror_label_index], + 'prefix': current['prefix'][current_snapmirror_label_index], + 'schedule': current['schedule'][current_snapmirror_label_index] + }) + obsolete_rules.append(rule) + return obsolete_rules + + def set_rule(self, rule, key, current, snapmirror_label_index, current_snapmirror_label_index): + if key not in self.parameters or self.parameters[key][snapmirror_label_index] == current[key][current_snapmirror_label_index]: + modified = False + rule[key] = current[key][current_snapmirror_label_index] + else: + modified = True + rule[key] = self.parameters[key][snapmirror_label_index] + return modified + + def identify_modified_snapmirror_policy_rules(self, current=None): + """ + Identify self.parameters rules that will be modified or not. + :return: List of 'modified' rules and a list of 'unmodified' rules + e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, ... ] + """ + modified_rules = [] + unmodified_rules = [] + if 'snapmirror_label' in self.parameters: + for snapmirror_label in self.parameters['snapmirror_label']: + snapmirror_label = snapmirror_label.strip() + if current is not None and 'snapmirror_label' in current and snapmirror_label in current['snapmirror_label']: + # Rule exists. Identify whether it requires modification or not. + modified = False + rule = {'snapmirror_label': snapmirror_label} + # Get indexes of current and supplied rule. + current_snapmirror_label_index = current['snapmirror_label'].index(snapmirror_label) + snapmirror_label_index = self.parameters['snapmirror_label'].index(snapmirror_label) + + # Check if keep modified + if self.set_rule(rule, 'keep', current, snapmirror_label_index, current_snapmirror_label_index): + modified = True + + # Check if prefix modified + if self.set_rule(rule, 'prefix', current, snapmirror_label_index, current_snapmirror_label_index): + modified = True + + # Check if schedule modified + if self.set_rule(rule, 'schedule', current, snapmirror_label_index, current_snapmirror_label_index): + modified = True + + if modified: + modified_rules.append(rule) + else: + unmodified_rules.append(rule) + return modified_rules, unmodified_rules + + def identify_snapmirror_policy_rules_with_schedule(self, rules=None): + """ + Identify rules that are using a schedule or not. At least one + non-schedule rule must be added to a policy before schedule rules + are added. + :return: List of rules with schedules and a list of rules without schedules + e.g. [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': 'daily', 'schedule': 'daily'}, ... ], + [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': '', 'schedule': ''}, ... ] + """ + schedule_rules = [] + non_schedule_rules = [] + if rules is not None: + for rule in rules: + if 'schedule' in rule: + schedule_rules.append(rule) + else: + non_schedule_rules.append(rule) + return schedule_rules, non_schedule_rules + + def modify_snapmirror_policy_rules(self, current=None, uuid=None): + """ + Modify existing rules in snapmirror policy + :return: None + """ + # Need 'snapmirror_label' to add/modify/delete rules + if 'snapmirror_label' not in self.parameters: + return + + obsolete_rules = self.identify_obsolete_snapmirror_policy_rules(current) + new_rules = self.identify_new_snapmirror_policy_rules(current) + modified_rules, unmodified_rules = self.identify_modified_snapmirror_policy_rules(current) + self.rest_api.log_debug('OBS', obsolete_rules) + self.rest_api.log_debug('NEW', new_rules) + self.rest_api.log_debug('MOD', modified_rules) + self.rest_api.log_debug('UNM', unmodified_rules) + + if self.use_rest: + return self.modify_snapmirror_policy_rules_rest(uuid, obsolete_rules, unmodified_rules, modified_rules, new_rules) + + delete_rules = obsolete_rules + modified_rules + add_schedule_rules, add_non_schedule_rules = self.identify_snapmirror_policy_rules_with_schedule(new_rules + modified_rules) + # Delete rules no longer required or modified rules that will be re-added. + for rule in delete_rules: + options = {'policy-name': self.parameters['policy_name'], + 'snapmirror-label': rule['snapmirror_label']} + self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-remove-rule') + + # Add rules. At least one non-schedule rule must exist before + # a rule with a schedule can be added, otherwise zapi will complain. + for rule in add_non_schedule_rules + add_schedule_rules: + options = {'policy-name': self.parameters['policy_name'], + 'snapmirror-label': rule['snapmirror_label'], + 'keep': str(rule['keep'])} + if 'prefix' in rule and rule['prefix'] != '': + options['prefix'] = rule['prefix'] + if 'schedule' in rule and rule['schedule'] != '': + options['schedule'] = rule['schedule'] + self.modify_snapmirror_policy_rule(options, 'snapmirror-policy-add-rule') + + def modify_snapmirror_policy_rules_rest(self, uuid, obsolete_rules, unmodified_rules, modified_rules, new_rules): + api = "snapmirror/policies" + if not modified_rules and not new_rules and not obsolete_rules: + return + rules = unmodified_rules + modified_rules + new_rules + # This will also delete all existing rules if everything is now obsolete + body = {'retention': self.create_snapmirror_policy_retention_obj_for_rest(rules)} + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error: + self.module.fail_json(msg='Error modifying snapmirror policy rules: %s' % error) + + def modify_snapmirror_policy_rule(self, options, zapi): + """ + Add, modify or remove a rule to/from a snapmirror policy + """ + snapmirror_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + try: + self.server.invoke_successfully(snapmirror_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying snapmirror policy rule %s: %s' % + (self.parameters['policy_name'], to_native(error)), + exception=traceback.format_exc()) + + def fail_invalid_option(self, policy_type, option): + self.module.fail_json(msg="Error: option %s is not supported with policy type %s." % (option, policy_type)) + + def validate_async_options(self): + if 'policy_type' in self.parameters: + disallowed_options = { + 'vault': ['copy_latest_source_snapshot', 'copy_all_source_snapshots'], + 'mirror-vault': ['copy_latest_source_snapshot', 'copy_all_source_snapshots', 'create_snapshot_on_source'], + 'async_mirror': ['create_snapshot_on_source'], + 'async': [], + 'sync': ['copy_latest_source_snapshot', 'copy_all_source_snapshots', 'create_snapshot_on_source'], + } + try: + options = disallowed_options[self.parameters['policy_type']] + except KeyError: + options = disallowed_options['sync'] + for option in options: + if option in self.parameters: + self.fail_invalid_option(self.parameters['policy_type'], option) + + if self.use_rest: + if 'copy_all_source_snapshots' in self.parameters and self.parameters.get('copy_all_source_snapshots') is not True: + self.module.fail_json(msg='Error: the property copy_all_source_snapshots can only be set to true when present') + if 'copy_latest_source_snapshot' in self.parameters and self.parameters.get('copy_latest_source_snapshot') is not True: + self.module.fail_json(msg='Error: the property copy_latest_source_snapshot can only be set to true when present') + if 'create_snapshot_on_source' in self.parameters and self.parameters['create_snapshot_on_source'] is not False: + self.module.fail_json(msg='Error: the property create_snapshot_on_source can only be set to false when present') + + def validate_policy_type(self): + # policy_type is only required for create or modify + if self.parameters['state'] != 'present': + return + self.validate_async_options() + if 'policy_type' in self.parameters: + if self.use_rest: + # Policy types 'mirror_vault', 'vault', 'async_mirror' are mapped to async policy type + if self.parameters['policy_type'] == 'vault': + self.parameters['policy_type'] = 'async' + self.parameters['create_snapshot_on_source'] = False + self.module.warn("policy type changed to 'async' with 'create_snapshot_on_source' set to False") + if self.parameters['policy_type'] == 'async_mirror': + # async_mirror accepts two choices with copy_all_source_snapshots or copy_latest_source_snapshot + self.parameters['policy_type'] = 'async' + if 'copy_latest_source_snapshot' not in self.parameters or 'copy_all_source_snapshots' not in self.parameters: + self.parameters['copy_latest_source_snapshot'] = True + self.module.warn("policy type changed to 'async' with copy_latest_source_snapshot set to True. " + "Use async with copy_latest_source_snapshot or copy_all_source_snapshots for async-mirror") + if 'copy_all_source_snapshots' in self.parameters or 'copy_latest_source_snapshot' in self.parameters: + if 'snapmirror_label' in self.parameters or 'keep' in self.parameters or 'prefix' in self.parameters or 'schedule' in self.parameters: + self.module.fail_json(msg='Error: Retention properties cannot be specified along with copy_all_source_snapshots or ' + 'copy_latest_source_snapshot properties') + + if 'create_snapshot_on_source' in self.parameters: + if 'snapmirror_label' not in self.parameters or 'keep' not in self.parameters: + self.module.fail_json(msg="Error: The properties snapmirror_label and keep must be specified with " + "create_snapshot_on_source set to false") + if self.parameters['policy_type'] == 'mirror_vault': + self.parameters['policy_type'] = 'async' + # Policy types ''sync_mirror', 'strict_sync_mirror' are mapped to sync policy type + if self.parameters['policy_type'] in ('sync_mirror', 'strict_sync_mirror'): + self.parameters['sync_type'] = 'sync' if self.parameters['policy_type'] == 'sync_mirror' else 'strict_sync' + self.parameters['policy_type'] = 'sync' + if self.parameters['policy_type'] != 'sync' and 'sync_type' in self.parameters: + self.module.fail_json(msg="Error: 'sync_type' is only applicable for sync policy_type") + + elif self.parameters['policy_type'] in ['async', 'sync']: + self.module.fail_json(msg='Error: The policy types async and sync are not supported in ZAPI.') + + def get_actions(self): + current, modify = self.get_snapmirror_policy(), None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if current and cd_action is None and self.parameters['state'] == 'present': + # Inconsistency in REST API - POST requires a vserver, but GET does not return it + current.pop('vserver') + modify = self.na_helper.get_modified_attributes(current, self.parameters) + for property in ('policy_type', 'copy_all_source_snapshots', 'copy_latest_source_snapshot', 'sync_type', 'create_snapshot_on_source'): + if property in modify: + self.module.fail_json(msg='Error: The policy property %s cannot be modified from %s to %s' + % (property, current.get(property), modify[property])) + + body = None + modify_body = any(key not in ('keep', 'prefix', 'schedule', 'snapmirror_label', 'copy_all_source_snapshots', 'copy_latest_source_snapshot', + 'sync_type', 'create_snapshot_on_source') for key in modify) if modify else False + if self.na_helper.changed and (cd_action == 'create' or modify): + # report any error even in check_mode + self.validate_parameters() + if self.use_rest and (cd_action == 'create' or modify_body): + body = self.build_body_for_create_or_modify(current.get('policy_type')) if modify_body else self.build_body_for_create() + return cd_action, modify, current, body + + def apply(self): + cd_action, modify, current, body = self.get_actions() + if self.na_helper.changed and not self.module.check_mode: + uuid = None + if cd_action == 'create': + self.create_snapmirror_policy(body) + if self.use_rest: + current = self.get_snapmirror_policy() + if not current: + self.module.fail_json(msg="Error: policy %s not present after create." % self.parameters['policy_name']) + uuid = current['uuid'] + if 'create_snapshot_on_source' not in self.parameters: + self.modify_snapmirror_policy_rules(current, uuid) + elif cd_action == 'delete': + if self.use_rest: + uuid = current['uuid'] + self.delete_snapmirror_policy(uuid) + elif modify: + if self.use_rest: + uuid = current['uuid'] + self.modify_snapmirror_policy(uuid, body) + self.modify_snapmirror_policy_rules(current, uuid) + + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap SnapMirror policy object and runs the correct play task + """ + obj = NetAppOntapSnapMirrorPolicy() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py new file mode 100644 index 000000000..50d703abb --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot.py @@ -0,0 +1,437 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_snapshot +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_snapshot +short_description: NetApp ONTAP manage Snapshots +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Modify/Delete ONTAP snapshots +options: + state: + description: + - If you want to create/modify a snapshot, or delete it. + choices: ['present', 'absent'] + type: str + default: present + snapshot: + description: + - Name of the snapshot to be managed. + - The maximum string length is 256 characters. + required: true + type: str + from_name: + description: + - Name of the existing snapshot to be renamed to. + version_added: 2.8.0 + type: str + volume: + description: + - Name of the volume on which the snapshot is to be created. + required: true + type: str + async_bool: + description: + - If true, the snapshot is to be created asynchronously. + type: bool + comment: + description: + - A human readable comment attached with the snapshot. + - The size of the comment can be at most 255 characters. + type: str + snapmirror_label: + description: + - A human readable SnapMirror Label attached with the snapshot. + - Size of the label can be at most 31 characters. + - Supported with REST on Ontap 9.7 or higher. + type: str + ignore_owners: + description: + - if this field is true, snapshot will be deleted even if some other processes are accessing it. + type: bool + snapshot_instance_uuid: + description: + - The 128 bit unique snapshot identifier expressed in the form of UUID. + type: str + vserver: + description: + - The Vserver name + required: true + type: str + expiry_time: + description: + - Snapshot expire time, only available with REST. + - format should be in the timezone configured with cluster. + type: str + version_added: 21.8.0 +''' +EXAMPLES = """ + - name: create SnapShot + tags: + - create + netapp.ontap.na_ontap_snapshot: + state: present + snapshot: "{{ snapshot name }}" + volume: "{{ vol name }}" + comment: "i am a comment" + expiry_time: "2022-02-04T14:00:00-05:00" + vserver: "{{ vserver name }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" + - name: delete SnapShot + tags: + - delete + netapp.ontap.na_ontap_snapshot: + state: absent + snapshot: "{{ snapshot name }}" + volume: "{{ vol name }}" + vserver: "{{ vserver name }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" + - name: modify SnapShot + tags: + - modify + netapp.ontap.na_ontap_snapshot: + state: present + snapshot: "{{ snapshot name }}" + comment: "New comments are great" + volume: "{{ vol name }}" + vserver: "{{ vserver name }}" + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" +""" + +RETURN = """ +""" + +import traceback +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_volume + + +class NetAppOntapSnapshot: + """ + Creates, modifies, and deletes a Snapshot + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + from_name=dict(required=False, type='str'), + snapshot=dict(required=True, type="str"), + volume=dict(required=True, type="str"), + async_bool=dict(required=False, type="bool"), + comment=dict(required=False, type="str"), + snapmirror_label=dict(required=False, type="str"), + ignore_owners=dict(required=False, type="bool"), + snapshot_instance_uuid=dict(required=False, type="str"), + vserver=dict(required=True, type="str"), + expiry_time=dict(required=False, type="str") + + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + unsupported_rest_properties = ['async_bool', 'ignore_owners', 'snapshot_instance_uuid'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, [['snapmirror_label', (9, 7)]]) + + if not self.use_rest: + if self.parameters.get('expiry_time'): + self.module.fail_json(msg="expiry_time is currently only supported with REST on Ontap 9.6 or higher") + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_snapshot(self, snapshot_name=None, volume_id=None): + """ + Checks to see if a snapshot exists or not + :return: Return True if a snapshot exists, False if it doesn't + """ + if self.use_rest: + api = ('storage/volumes/%s/snapshots' % volume_id) + params = { + 'svm.name': self.parameters['vserver'], + 'fields': 'uuid,comment,expiry_time,volume,name', + } + if self.parameters.get('snapmirror_label'): + params['fields'] += ',snapmirror_label' + params['name'] = snapshot_name or self.parameters['snapshot'] + snapshot, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching snapshot %s: %s' % + (params['name'], to_native(error)), + exception=traceback.format_exc()) + if snapshot: + return { + 'uuid': snapshot['uuid'], + 'snapshot': snapshot['name'], + 'snapmirror_label': snapshot.get('snapmirror_label'), + 'expiry_time': snapshot.get('expiry_time'), + 'comment': snapshot.get('comment') + } + return None + + else: + if snapshot_name is None: + snapshot_name = self.parameters['snapshot'] + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter") + desired_attr = netapp_utils.zapi.NaElement("desired-attributes") + snapshot_info = netapp_utils.zapi.NaElement('snapshot-info') + comment = netapp_utils.zapi.NaElement('comment') + snapmirror_label = netapp_utils.zapi.NaElement('snapmirror-label') + # add more desired attributes that are allowed to be modified + snapshot_info.add_child_elem(comment) + snapshot_info.add_child_elem(snapmirror_label) + desired_attr.add_child_elem(snapshot_info) + snapshot_obj.add_child_elem(desired_attr) + # compose query + query = netapp_utils.zapi.NaElement("query") + snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") + snapshot_info_obj.add_new_child("name", snapshot_name) + snapshot_info_obj.add_new_child("volume", self.parameters['volume']) + snapshot_info_obj.add_new_child("vserver", self.parameters['vserver']) + query.add_child_elem(snapshot_info_obj) + snapshot_obj.add_child_elem(query) + try: + result = self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching snapshot %s: %s' % + (snapshot_name, to_native(error)), + exception=traceback.format_exc()) + return_value = None + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + attributes_list = result.get_child_by_name('attributes-list') + snap_info = attributes_list.get_child_by_name('snapshot-info') + return_value = { + 'comment': snap_info.get_child_content('comment'), + 'snapmirror_label': None + } + if snap_info.get_child_by_name('snapmirror-label'): + return_value['snapmirror_label'] = snap_info.get_child_content('snapmirror-label') + return return_value + + def create_snapshot(self, volume_id=None): + """ + Creates a new snapshot + """ + + if self.use_rest: + api = ('storage/volumes/%s/snapshots' % volume_id) + body = { + 'name': self.parameters['snapshot'], + 'svm': { + 'name': self.parameters['vserver'] + } + } + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + if self.parameters.get('snapmirror_label'): + body['snapmirror_label'] = self.parameters['snapmirror_label'] + if self.parameters.get('expiry_time'): + body['expiry_time'] = self.parameters['expiry_time'] + response, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error when creating snapshot: %s" % error) + + else: + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-create") + + # set up required variables to create a snapshot + snapshot_obj.add_new_child("snapshot", self.parameters['snapshot']) + snapshot_obj.add_new_child("volume", self.parameters['volume']) + # Set up optional variables to create a snapshot + if self.parameters.get('async_bool'): + snapshot_obj.add_new_child("async", str(self.parameters['async_bool'])) + if self.parameters.get('comment'): + snapshot_obj.add_new_child("comment", self.parameters['comment']) + if self.parameters.get('snapmirror_label'): + snapshot_obj.add_new_child( + "snapmirror-label", self.parameters['snapmirror_label']) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating snapshot %s: %s' % + (self.parameters['snapshot'], to_native(error)), + exception=traceback.format_exc()) + + def delete_snapshot(self, volume_id=None, uuid=None): + """ + Deletes an existing snapshot + """ + if self.use_rest: + api = ('storage/volumes/%s/snapshots/%s' % (volume_id, uuid)) + response, error = rest_generic.delete_async(self.rest_api, api, None) + if error: + self.module.fail_json(msg="Error when deleting snapshot: %s" % error) + + else: + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-delete") + + # Set up required variables to delete a snapshot + snapshot_obj.add_new_child("snapshot", self.parameters['snapshot']) + snapshot_obj.add_new_child("volume", self.parameters['volume']) + # set up optional variables to delete a snapshot + if self.parameters.get('ignore_owners'): + snapshot_obj.add_new_child("ignore-owners", str(self.parameters['ignore_owners'])) + if self.parameters.get('snapshot_instance_uuid'): + snapshot_obj.add_new_child("snapshot-instance-uuid", self.parameters['snapshot_instance_uuid']) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting snapshot %s: %s' % + (self.parameters['snapshot'], to_native(error)), + exception=traceback.format_exc()) + + def modify_snapshot(self, volume_id=None, uuid=None, rename=False): + """ + Modify an existing snapshot + :return: + """ + if self.use_rest: + api = 'storage/volumes/%s/snapshots/%s' % (volume_id, uuid) + body = {'name': self.parameters['snapshot']} if rename else {} + if self.parameters.get('comment'): + body['comment'] = self.parameters['comment'] + if self.parameters.get('snapmirror_label'): + body['snapmirror_label'] = self.parameters['snapmirror_label'] + if self.parameters.get('expiry_time'): + body['expiry_time'] = self.parameters['expiry_time'] + response, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg="Error when modifying snapshot: %s" % error) + + else: + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-modify-iter") + # Create query object, this is the existing object + query = netapp_utils.zapi.NaElement("query") + snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") + snapshot_info_obj.add_new_child("name", self.parameters['snapshot']) + snapshot_info_obj.add_new_child("vserver", self.parameters['vserver']) + query.add_child_elem(snapshot_info_obj) + snapshot_obj.add_child_elem(query) + + # this is what we want to modify in the snapshot object + attributes = netapp_utils.zapi.NaElement("attributes") + snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info") + snapshot_info_obj.add_new_child("name", self.parameters['snapshot']) + if self.parameters.get('comment'): + snapshot_info_obj.add_new_child("comment", self.parameters['comment']) + if self.parameters.get('snapmirror_label'): + snapshot_info_obj.add_new_child("snapmirror-label", self.parameters['snapmirror_label']) + attributes.add_child_elem(snapshot_info_obj) + snapshot_obj.add_child_elem(attributes) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying snapshot %s: %s' % + (self.parameters['snapshot'], to_native(error)), + exception=traceback.format_exc()) + + def rename_snapshot(self): + """ + Rename the sanpshot + """ + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-rename") + + # set up required variables to rename a snapshot + snapshot_obj.add_new_child("current-name", self.parameters['from_name']) + snapshot_obj.add_new_child("new-name", self.parameters['snapshot']) + snapshot_obj.add_new_child("volume", self.parameters['volume']) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming snapshot %s to %s: %s' % + (self.parameters['from_name'], self.parameters['snapshot'], to_native(error)), + exception=traceback.format_exc()) + + def get_volume_uuid(self): + """ + Get a volume's UUID + :return: uuid of the volume + """ + response, error = rest_volume.get_volume(self.rest_api, self.parameters['vserver'], self.parameters['volume']) + if error is not None: + self.module.fail_json(msg="Error getting volume info: %s" % error) + return response['uuid'] if response else None + + def apply(self): + """ + Check to see which play we should run + """ + volume_id = None + uuid = None + current = None + if not self.use_rest: + current = self.get_snapshot() + else: + volume_id = self.get_volume_uuid() + if volume_id is None: + self.module.fail_json(msg="Error: volume %s not found for vserver %s." % (self.parameters['volume'], self.parameters['vserver'])) + current = self.get_snapshot(volume_id=volume_id) + + rename = False + modify = {} + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + current = self.get_snapshot(self.parameters['from_name'], volume_id=volume_id) + if current is None: + self.module.fail_json(msg='Error renaming snapshot: %s - no snapshot with from_name: %s.' + % (self.parameters['snapshot'], self.parameters['from_name'])) + rename = True + cd_action = None + if cd_action is None: + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + uuid = current['uuid'] if current and self.use_rest else None + if rename and not self.use_rest: + # with REST, rename forces a change in modify for 'name' + self.rename_snapshot() + if cd_action == 'create': + self.create_snapshot(volume_id=volume_id) + elif cd_action == 'delete': + self.delete_snapshot(volume_id=volume_id, uuid=uuid) + elif modify: + self.modify_snapshot(volume_id=volume_id, uuid=uuid, rename=rename) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates, modifies, and deletes a Snapshot + """ + obj = NetAppOntapSnapshot() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py new file mode 100644 index 000000000..1d271657a --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snapshot_policy.py @@ -0,0 +1,742 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: na_ontap_snapshot_policy +short_description: NetApp ONTAP manage Snapshot Policy +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create/Modify/Delete ONTAP snapshot policies +options: + state: + description: + - If you want to create, modify or delete a snapshot policy. + choices: ['present', 'absent'] + type: str + default: present + name: + description: + Name of the snapshot policy to be managed. + The maximum string length is 256 characters. + required: true + type: str + enabled: + description: + - Status of the snapshot policy indicating whether the policy will be enabled or disabled. + type: bool + comment: + description: + A human readable comment attached with the snapshot. + The size of the comment can be at most 255 characters. + type: str + count: + description: + Retention count for the snapshots created by the schedule. + type: list + elements: int + schedule: + description: + - Schedule to be added inside the policy. + type: list + elements: str + prefix: + description: + - Snapshot name prefix for the schedule. + - Prefix name should be unique within the policy. + - Cannot set a different prefix to a schedule that has already been assigned to a snapshot policy. + - Prefix cannot be modifed after schedule has been added. + type: list + elements: str + required: false + version_added: '19.11.0' + snapmirror_label: + description: + - SnapMirror label assigned to each schedule inside the policy. Use an empty + string ('') for no label. + type: list + elements: str + required: false + version_added: 2.9.0 + vserver: + description: + - The name of the vserver to use. In a multi-tenanted environment, assigning a + Snapshot Policy to a vserver will restrict its use to that vserver. + required: false + type: str + version_added: 2.9.0 +''' +EXAMPLES = """ + - name: Create Snapshot policy + na_ontap_snapshot_policy: + state: present + name: ansible2 + schedule: hourly + prefix: hourly + count: 150 + enabled: True + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + https: False + + - name: Create Snapshot policy with multiple schedules + na_ontap_snapshot_policy: + state: present + name: ansible2 + schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min'] + prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min'] + count: [1, 2, 3, 4, 5] + enabled: True + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + https: False + + - name: Create Snapshot policy owned by a vserver + na_ontap_snapshot_policy: + state: present + name: ansible3 + vserver: ansible + schedule: ['hourly', 'daily', 'weekly', 'monthly', '5min'] + prefix: ['hourly', 'daily', 'weekly', 'monthly', '5min'] + count: [1, 2, 3, 4, 5] + snapmirror_label: ['hourly', 'daily', 'weekly', 'monthly', ''] + enabled: True + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + https: False + + - name: Modify Snapshot policy with multiple schedules + na_ontap_snapshot_policy: + state: present + name: ansible2 + schedule: ['daily', 'weekly'] + count: [20, 30] + snapmirror_label: ['daily', 'weekly'] + enabled: True + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + https: False + + - name: Delete Snapshot policy + na_ontap_snapshot_policy: + state: absent + name: ansible2 + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + hostname: "{{ netapp_hostname }}" + https: False +""" + +RETURN = """ +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapSnapshotPolicy(object): + """ + Creates and deletes a Snapshot Policy + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type="str"), + enabled=dict(required=False, type="bool"), + # count is a list of integers + count=dict(required=False, type="list", elements="int"), + comment=dict(required=False, type="str"), + schedule=dict(required=False, type="list", elements="str"), + prefix=dict(required=False, type="list", elements="str"), + snapmirror_label=dict(required=False, type="list", elements="str"), + vserver=dict(required=False, type="str") + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('state', 'present', ['enabled', 'count', 'schedule']), + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0): + msg = 'REST requires ONTAP 9.8 or later for snapshot schedules.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + if 'vserver' in self.parameters: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def safe_strip(self, option): + """ strip the given string """ + return option.strip() if option is not None else None + + def get_snapshot_policy(self): + """ + Checks to see if a snapshot policy exists or not + :return: Return policy details if a snapshot policy exists, None if it doesn't + """ + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-get-iter") + # compose query + query = netapp_utils.zapi.NaElement("query") + snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-policy-info") + snapshot_info_obj.add_new_child("policy", self.parameters['name']) + if 'vserver' in self.parameters: + snapshot_info_obj.add_new_child("vserver-name", self.parameters['vserver']) + query.add_child_elem(snapshot_info_obj) + snapshot_obj.add_child_elem(query) + try: + result = self.server.invoke_successfully(snapshot_obj, True) + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) == 1: + snapshot_policy = result.get_child_by_name('attributes-list').get_child_by_name('snapshot-policy-info') + current = {'name': snapshot_policy.get_child_content('policy')} + current['vserver'] = snapshot_policy.get_child_content('vserver-name') + current['enabled'] = snapshot_policy.get_child_content('enabled').lower() != 'false' + current['comment'] = snapshot_policy.get_child_content('comment') or '' + current['schedule'], current['count'], current['snapmirror_label'], current['prefix'] = [], [], [], [] + if snapshot_policy.get_child_by_name('snapshot-policy-schedules'): + for schedule in snapshot_policy['snapshot-policy-schedules'].get_children(): + current['schedule'].append(schedule.get_child_content('schedule')) + current['count'].append(int(schedule.get_child_content('count'))) + + snapmirror_label = schedule.get_child_content('snapmirror-label') + if snapmirror_label is None or snapmirror_label == '-': + snapmirror_label = '' + current['snapmirror_label'].append(snapmirror_label) + + prefix = schedule.get_child_content('prefix') + if prefix is None or prefix == '-': + prefix = '' + current['prefix'].append(prefix) + return current + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + return None + + def validate_parameters(self): + """ + Validate if each schedule has a count associated + :return: None + """ + if 'count' not in self.parameters or 'schedule' not in self.parameters or \ + len(self.parameters['count']) > 5 or len(self.parameters['schedule']) > 5 or \ + len(self.parameters['count']) < 1 or len(self.parameters['schedule']) < 1 or \ + len(self.parameters['count']) != len(self.parameters['schedule']): + self.module.fail_json(msg="Error: A Snapshot policy must have at least 1 " + "schedule and can have up to a maximum of 5 schedules, with a count " + "representing the maximum number of Snapshot copies for each schedule") + + if 'snapmirror_label' in self.parameters and len(self.parameters['snapmirror_label']) != len(self.parameters['schedule']): + self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an accompanying SnapMirror Label") + + if 'prefix' in self.parameters and len(self.parameters['prefix']) != len(self.parameters['schedule']): + self.module.fail_json(msg="Error: Each Snapshot Policy schedule must have an accompanying prefix") + + def modify_snapshot_policy(self, current): + """ + Modifies an existing snapshot policy + """ + # Set up required variables to modify snapshot policy + options = {'policy': self.parameters['name']} + modify = False + + # Set up optional variables to modify snapshot policy + if 'enabled' in self.parameters and self.parameters['enabled'] != current['enabled']: + options['enabled'] = str(self.parameters['enabled']) + modify = True + if 'comment' in self.parameters and self.parameters['comment'] != current['comment']: + options['comment'] = self.parameters['comment'] + modify = True + + if modify: + snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-modify', **options) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying snapshot policy %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_snapshot_policy_schedules(self, current): + """ + Modify existing schedules in snapshot policy + :return: None + """ + self.validate_parameters() + delete_schedules, modify_schedules, add_schedules = [], [], [] + + if 'snapmirror_label' in self.parameters: + snapmirror_labels = self.parameters['snapmirror_label'] + else: + # User hasn't supplied any snapmirror labels. + snapmirror_labels = [None] * len(self.parameters['schedule']) + + # Identify schedules for deletion + for schedule in current['schedule']: + schedule = self.safe_strip(schedule) + if schedule not in [item.strip() for item in self.parameters['schedule']]: + options = {'policy': current['name'], + 'schedule': schedule} + delete_schedules.append(options) + + # Identify schedules to be modified or added + for schedule, count, snapmirror_label in zip(self.parameters['schedule'], self.parameters['count'], snapmirror_labels): + schedule = self.safe_strip(schedule) + snapmirror_label = self.safe_strip(snapmirror_label) + + options = {'policy': current['name'], + 'schedule': schedule} + + if schedule in current['schedule']: + # Schedule exists. Only modify if it has changed. + modify = False + schedule_index = current['schedule'].index(schedule) + + if count != current['count'][schedule_index]: + options['new-count'] = str(count) + modify = True + + if snapmirror_label is not None and snapmirror_label != current['snapmirror_label'][schedule_index]: + options['new-snapmirror-label'] = snapmirror_label + modify = True + + if modify: + modify_schedules.append(options) + else: + # New schedule + options['count'] = str(count) + if snapmirror_label is not None and snapmirror_label != '': + options['snapmirror-label'] = snapmirror_label + add_schedules.append(options) + + # Delete N-1 schedules no longer required. Must leave 1 schedule in policy + # at any one time. Delete last one afterwards. + while len(delete_schedules) > 1: + options = delete_schedules.pop() + self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule') + + # Modify schedules. + while modify_schedules: + options = modify_schedules.pop() + self.modify_snapshot_policy_schedule(options, 'snapshot-policy-modify-schedule') + + # Add 1 new schedule. Add other ones after last schedule has been deleted. + if add_schedules: + options = add_schedules.pop() + self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule') + + # Delete last schedule no longer required. + while delete_schedules: + options = delete_schedules.pop() + self.modify_snapshot_policy_schedule(options, 'snapshot-policy-remove-schedule') + + # Add remaining new schedules. + while add_schedules: + options = add_schedules.pop() + self.modify_snapshot_policy_schedule(options, 'snapshot-policy-add-schedule') + + def modify_snapshot_policy_schedule(self, options, zapi): + """ + Add, modify or remove a schedule to/from a snapshot policy + """ + snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options) + try: + self.server.invoke_successfully(snapshot_obj, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying snapshot policy schedule %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def create_snapshot_policy(self): + """ + Creates a new snapshot policy + """ + # set up required variables to create a snapshot policy + self.validate_parameters() + options = {'policy': self.parameters['name'], + 'enabled': str(self.parameters['enabled']), + } + + if 'snapmirror_label' in self.parameters: + snapmirror_labels = self.parameters['snapmirror_label'] + else: + # User hasn't supplied any snapmirror labels. + snapmirror_labels = [None] * len(self.parameters['schedule']) + + if 'prefix' in self.parameters: + prefixes = self.parameters['prefix'] + else: + # User hasn't supplied any prefixes. + prefixes = [None] * len(self.parameters['schedule']) + + # zapi attribute for first schedule is schedule1, second is schedule2 and so on + positions = [str(i) for i in range(1, len(self.parameters['schedule']) + 1)] + for schedule, prefix, count, snapmirror_label, position in \ + zip(self.parameters['schedule'], prefixes, + self.parameters['count'], snapmirror_labels, positions): + schedule = self.safe_strip(schedule) + options['count' + position] = str(count) + options['schedule' + position] = schedule + snapmirror_label = self.safe_strip(snapmirror_label) + if snapmirror_label: + options['snapmirror-label' + position] = snapmirror_label + prefix = self.safe_strip(prefix) + if prefix: + options['prefix' + position] = prefix + + snapshot_obj = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-policy-create', **options) + + # Set up optional variables to create a snapshot policy + if self.parameters.get('comment'): + snapshot_obj.add_new_child("comment", self.parameters['comment']) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating snapshot policy %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_snapshot_policy(self): + """ + Deletes an existing snapshot policy + """ + snapshot_obj = netapp_utils.zapi.NaElement("snapshot-policy-delete") + + # Set up required variables to delete a snapshot policy + snapshot_obj.add_new_child("policy", self.parameters['name']) + try: + self.server.invoke_successfully(snapshot_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting snapshot policy %s: %s' % + (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def asup_log_for_cserver(self, event_name): + """ + Fetch admin vserver for the given cluster + Create and Autosupport log event with the given module name + :param event_name: Name of the event log + :return: None + """ + if 'vserver' in self.parameters: + netapp_utils.ems_log_event(event_name, self.server) + else: + results = netapp_utils.get_cserver(self.server) + cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) + netapp_utils.ems_log_event(event_name, cserver) + +# REST API support for create, delete and modify snapshot policy + def get_snapshot_schedule_rest(self, current): + """ + get details of the snapshot schedule with rest API. + """ + query = {'snapshot_policy.name': current['name']} + api = 'storage/snapshot-policies/%s/schedules' % current['uuid'] + fields = 'schedule.name,schedule.uuid,snapmirror_label,count,prefix' + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error on fetching snapshot schedule: %s" % error) + if records: + scheduleRecords = { + 'counts': [], + 'prefixes': [], + 'schedule_names': [], + 'schedule_uuids': [], + 'snapmirror_labels': [] + } + for item in records: + scheduleRecords['counts'].append(item['count']) + scheduleRecords['prefixes'].append(item['prefix']) + scheduleRecords['schedule_names'].append(item['schedule']['name']) + scheduleRecords['schedule_uuids'].append(item['schedule']['uuid']) + scheduleRecords['snapmirror_labels'].append(item['snapmirror_label']) + return scheduleRecords + return None + + def get_snapshot_policy_rest(self): + """ + get details of the snapshot policy with rest API. + """ + if not self.use_rest: + return self.get_snapshot_policy() + query = {'name': self.parameters['name']} + if self.parameters.get('vserver'): + query['svm.name'] = self.parameters['vserver'] + query['scope'] = 'svm' + else: + query['scope'] = 'cluster' + api = 'storage/snapshot-policies' + fields = 'enabled,svm.uuid,comment,copies.snapmirror_label,copies.count,copies.prefix,copies.schedule.name,scope' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error on fetching snapshot policy: %s" % error) + if record: + current = { + 'enabled': record['enabled'], + 'name': record['name'], + 'uuid': record['uuid'], + 'comment': record.get('comment', ''), + 'count': [], + 'prefix': [], + 'schedule': [], + 'snapmirror_label': [] + } + if query['scope'] == 'svm': + current['svm_name'] = record['svm']['name'] + current['svm_uuid'] = record['svm']['uuid'] + if record['copies']: + for item in record['copies']: + current['count'].append(item['count']) + current['prefix'].append(item['prefix']) + current['schedule'].append(item['schedule']['name']) + current['snapmirror_label'].append(item['snapmirror_label']) + return current + return record + + def create_snapshot_policy_rest(self): + """ + create snapshot policy with rest API. + """ + if not self.use_rest: + return self.create_snapshot_policy() + + body = { + 'name': self.parameters.get('name'), + 'enabled': self.parameters.get('enabled'), + 'copies': [] + } + if self.parameters.get('vserver'): + body['svm.name'] = self.parameters['vserver'] + if 'comment' in self.parameters: + body['comment'] = self.parameters['comment'] + if 'snapmirror_label' in self.parameters: + snapmirror_labels = self.parameters['snapmirror_label'] + else: + # User hasn't supplied any snapmirror labels. + snapmirror_labels = [None] * len(self.parameters['schedule']) + + if 'prefix' in self.parameters: + prefixes = self.parameters['prefix'] + else: + # User hasn't supplied any prefixes. + prefixes = [None] * len(self.parameters['schedule']) + for schedule, prefix, count, snapmirror_label in \ + zip(self.parameters['schedule'], prefixes, + self.parameters['count'], snapmirror_labels): + copy = { + 'schedule': {'name': self.safe_strip(schedule)}, + 'count': count + } + snapmirror_label = self.safe_strip(snapmirror_label) + if snapmirror_label: + copy['snapmirror_label'] = snapmirror_label + prefix = self.safe_strip(prefix) + if prefix: + copy['prefix'] = prefix + body['copies'].append(copy) + api = 'storage/snapshot-policies' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating snapshot policy: %s" % error) + + def delete_snapshot_policy_rest(self, current): + """ + delete snapshot policy with rest API. + """ + if not self.use_rest: + return self.delete_snapshot_policy() + api = 'storage/snapshot-policies' + dummy, error = rest_generic.delete_async(self.rest_api, api, current['uuid']) + if error is not None: + self.module.fail_json(msg="Error on deleting snapshot policy: %s" % error) + + def modify_snapshot_policy_rest(self, modify, current=None): + """ + Modify snapshot policy with rest API. + """ + if not self.use_rest: + return self.modify_snapshot_policy(current) + api = 'storage/snapshot-policies' + body = {} + if 'enabled' in modify: + body['enabled'] = modify['enabled'] + if 'comment' in modify: + body['comment'] = modify['comment'] + if body: + dummy, error = rest_generic.patch_async(self.rest_api, api, current['uuid'], body) + if error is not None: + self.module.fail_json(msg="Error on modifying snapshot policy: %s" % error) + + def modify_snapshot_policy_schedule_rest(self, modify, current): + """ + Modify snapshot schedule with rest API. + """ + if not self.use_rest: + return self.modify_snapshot_policy_schedules(current) + + schedule_info = None + api = 'storage/snapshot-policies/%s/schedules' % current['uuid'] + schedule_info = self.get_snapshot_schedule_rest(current) + delete_schedules, modify_schedules, add_schedules = [], [], [] + + if 'snapmirror_label' in self.parameters: + snapmirror_labels = self.parameters['snapmirror_label'] + else: + # User hasn't supplied any snapmirror labels. + snapmirror_labels = [None] * len(self.parameters['schedule']) + + if 'prefix' in self.parameters: + prefixes = self.parameters['prefix'] + else: + # User hasn't supplied any prefix. + prefixes = [None] * len(self.parameters['schedule']) + + # Identify schedules to be deleted + for schedule_name, schedule_uuid in zip(schedule_info['schedule_names'], schedule_info['schedule_uuids']): + schedule_name = self.safe_strip(schedule_name) + if schedule_name not in [item.strip() for item in self.parameters['schedule']]: + delete_schedules.append(schedule_uuid) + + # Identify schedules to be modified or added + for schedule_name, count, snapmirror_label, prefix in zip(self.parameters['schedule'], self.parameters['count'], snapmirror_labels, prefixes): + schedule_name = self.safe_strip(schedule_name) + if snapmirror_label: + snapmirror_label = self.safe_strip(snapmirror_label) + if prefix: + prefix = self.safe_strip(prefix) + body = {} + if schedule_name in schedule_info['schedule_names']: + # Schedule exists. Only modify if it has changed. + modify = False + schedule_index = schedule_info['schedule_names'].index(schedule_name) + schedule_uuid = schedule_info['schedule_uuids'][schedule_index] + if count != schedule_info['counts'][schedule_index]: + body['count'] = str(count) + modify = True + + if snapmirror_label is not None and snapmirror_label != schedule_info['snapmirror_labels'][schedule_index]: + body['snapmirror_label'] = snapmirror_label + modify = True + + if prefix is not None and prefix != schedule_info['prefixes'][schedule_index]: + body['prefix'] = prefix + modify = True + + if modify: + body['schedule_uuid'] = schedule_uuid + modify_schedules.append(body) + else: + # New schedule + body['schedule.name'] = schedule_name + body['count'] = str(count) + if snapmirror_label is not None and snapmirror_label != '': + body['snapmirror_label'] = snapmirror_label + if prefix is not None and prefix != '': + body['prefix'] = prefix + add_schedules.append(body) + + # Delete N-1 schedules no longer required. Must leave 1 schedule in policy + # at any one time. Delete last one afterwards. + while len(delete_schedules) > 1: + schedule_uuid = delete_schedules.pop() + record, error = rest_generic.delete_async(self.rest_api, api, schedule_uuid) + if error is not None: + self.module.fail_json(msg="Error on deleting snapshot policy schedule: %s" % error) + + # Modify schedules. + while modify_schedules: + body = modify_schedules.pop() + schedule_id = body.pop('schedule_uuid') + record, error = rest_generic.patch_async(self.rest_api, api, schedule_id, body) + if error is not None: + self.module.fail_json(msg="Error on modifying snapshot policy schedule: %s" % error) + + # Add 1 new schedule. At least one schedule must be present, before we can delete the last old one. + if add_schedules: + body = add_schedules.pop() + record, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on adding snapshot policy schedule: %s" % error) + + while delete_schedules: + schedule_uuid = delete_schedules.pop() + record, error = rest_generic.delete_async(self.rest_api, api, schedule_uuid) + if error is not None: + self.module.fail_json(msg="Error on deleting snapshot policy schedule: %s" % error) + + while add_schedules: + body = add_schedules.pop() + record, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on adding snapshot policy schedule: %s" % error) + + def apply(self): + """ + Check to see which play we should run + """ + current = self.get_snapshot_policy_rest() + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.parameters['state'] == 'present': + self.validate_parameters() + if cd_action is None and self.parameters['state'] == 'present': + # Don't sort schedule/prefix/count/snapmirror_label lists as it can + # mess up the intended parameter order. + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_snapshot_policy_rest() + elif cd_action == 'delete': + self.delete_snapshot_policy_rest(current) + + if modify: + self.modify_snapshot_policy_rest(modify, current) + self.modify_snapshot_policy_schedule_rest(modify, current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates and deletes a Snapshot Policy + """ + obj = NetAppOntapSnapshotPolicy() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py new file mode 100644 index 000000000..c1f278e0d --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +""" +create SNMP module to add/delete/modify SNMP user +""" + +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - "Create/Delete SNMP community" +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_snmp +options: + access_control: + choices: ['ro'] + description: + - "Access control for the community. The only supported value is 'ro' (read-only). Ignored with REST" + default: 'ro' + type: str + community_name: + description: + - "The name of the SNMP community to manage." + required: true + type: str + state: + choices: ['present', 'absent'] + description: + - "Whether the specified SNMP community should exist or not." + default: 'present' + type: str +short_description: NetApp ONTAP SNMP community +version_added: 2.6.0 +''' + +EXAMPLES = """ + - name: Create SNMP community (ZAPI only) + netapp.ontap.na_ontap_snmp: + state: present + community_name: communityName + access_control: 'ro' + use_rest: never + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Create SNMP community (snmpv1 or snmpv2) (REST only) + netapp.ontap.na_ontap_snmp: + state: present + community_name: communityName + use_rest: always + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete SNMP community (ZAPI only) + netapp.ontap.na_ontap_snmp: + state: absent + community_name: communityName + access_control: 'ro' + use_rest: never + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + - name: Delete SNMP community (snmpv1 or snmpv2) (REST only) + netapp.ontap.na_ontap_snmp: + state: absent + community_name: communityName + use_rest: always + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPSnmp(object): + '''Class with SNMP methods, doesn't support check mode''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + community_name=dict(required=True, type='str'), + access_control=dict(required=False, type='str', choices=['ro'], default='ro'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def invoke_snmp_community(self, zapi): + """ + Invoke zapi - add/delete take the same NaElement structure + """ + snmp_community = netapp_utils.zapi.NaElement.create_node_with_children( + zapi, **{'community': self.parameters['community_name'], + 'access-control': self.parameters['access_control']}) + try: + self.server.invoke_successfully(snmp_community, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if zapi == 'snmp-community-add': + action = 'adding' + elif zapi == 'snmp-community-delete': + action = 'deleting' + else: + action = 'unexpected' + self.module.fail_json(msg='Error %s community %s: %s' % (action, self.parameters['community_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_snmp(self): + """ + Check if SNMP community exists + """ + if self.use_rest: + return self.get_snmp_rest() + snmp_obj = netapp_utils.zapi.NaElement('snmp-status') + try: + result = self.server.invoke_successfully(snmp_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg=to_native(error), exception=traceback.format_exc()) + if result.get_child_by_name('communities') is not None: + for snmp_entry in result.get_child_by_name('communities').get_children(): + community_name = snmp_entry.get_child_content('community') + if community_name == self.parameters['community_name']: + return { + 'community_name': snmp_entry.get_child_content('community'), + 'access_control': snmp_entry.get_child_content('access-control'), + } + return None + + def get_snmp_rest(self): + # There can be SNMPv1, SNMPv2 (called community) or + # SNMPv3 local or SNMPv3 remote (called users) + api = 'support/snmp/users' + params = {'name': self.parameters['community_name'], + 'fields': 'name,engine_id'} + message, error = self.rest_api.get(api, params) + record, error = rrh.check_for_0_or_1_records(api, message, error) + if error: + self.module.fail_json(msg=error) + if record: + # access control does not exist in rest + return dict(community_name=record['name'], engine_id=record['engine_id'], access_control='ro') + return None + + def add_snmp_community(self): + """ + Adds a SNMP community + """ + if self.use_rest: + self.add_snmp_community_rest() + else: + self.invoke_snmp_community('snmp-community-add') + + def add_snmp_community_rest(self): + api = 'support/snmp/users' + params = {'name': self.parameters['community_name'], + 'authentication_method': 'community'} + message, error = self.rest_api.post(api, params) + if error: + self.module.fail_json(msg=error) + + def delete_snmp_community(self, current=None): + """ + Delete a SNMP community + """ + if self.use_rest: + self.delete_snmp_community_rest(current) + else: + self.invoke_snmp_community('snmp-community-delete') + + def delete_snmp_community_rest(self, current): + api = 'support/snmp/users/' + current['engine_id'] + '/' + self.parameters["community_name"] + dummy, error = self.rest_api.delete(api) + if error: + self.module.fail_json(msg=error) + + def apply(self): + """ + Apply action to SNMP community + This module is not idempotent: + Add doesn't fail the playbook if user is trying + to add an already existing snmp community + """ + # TODO: This module should of been called snmp_community has it only deals with community and not snmp + current = self.get_snmp() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.add_snmp_community() + elif cd_action == 'delete': + self.delete_snmp_community(current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + '''Execute action''' + community_obj = NetAppONTAPSnmp() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py new file mode 100644 index 000000000..e27e8e7e5 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_snmp_traphosts.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +""" +create SNMP module to add/delete/modify SNMP user +""" + +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_snmp_traphosts +short_description: NetApp ONTAP SNMP traphosts. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.3.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Whether the specified SNMP traphost should exist or not. Requires REST with 9.7 or higher +options: + state: + choices: ['present', 'absent'] + description: + - "Whether the specified SNMP traphost should exist or not." + default: 'present' + type: str + host: + description: + - "Fully qualified domain name (FQDN), IPv4 address or IPv6 address of SNMP traphost." + aliases: ['ip_address'] + required: true + type: str + version_added: 21.24.0 +''' + +EXAMPLES = """ + - name: Create SNMP traphost + netapp.ontap.na_ontap_snmp_traphosts: + state: present + host: example1.com + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + - name: Delete SNMP traphost + netapp.ontap.na_ontap_snmp_traphosts: + state: absent + host: example1.com + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +""" +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPSnmpTraphosts: + """Class with SNMP methods""" + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + host=dict(required=True, type='str', aliases=['ip_address']), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_snmp_traphosts', 9, 7) + + def get_snmp_traphosts(self): + query = {'host': self.parameters.get('host'), + 'fields': 'host'} + api = 'support/snmp/traphosts' + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching snmp traphosts info: %s" % error) + return record + + def create_snmp_traphost(self): + api = 'support/snmp/traphosts' + params = {'host': self.parameters.get('host')} + dummy, error = rest_generic.post_async(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error creating traphost: %s" % error) + + def delete_snmp_traphost(self): + api = 'support/snmp/traphosts' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters.get('host')) + if error is not None: + self.module.fail_json(msg="Error deleting traphost: %s" % error) + + def apply(self): + """ + Apply action to SNMP traphost + """ + current = self.get_snmp_traphosts() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_snmp_traphost() + elif cd_action == 'delete': + self.delete_snmp_traphost() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + community_obj = NetAppONTAPSnmpTraphosts() + community_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py new file mode 100644 index 000000000..941d23eac --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_software_update.py @@ -0,0 +1,722 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_software_update +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Update ONTAP software + - Requires an https connection and is not supported over http +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_software_update +options: + state: + choices: ['present', 'absent'] + description: + - This module downloads and optionally installs ONTAP software on a cluster. + - The software package is deleted after a successful installation. + - If the software package is already present, it is not downloaded and not replaced. + - When state is absent, the package is deleted from disk. + default: present + type: str + nodes: + description: + - List of nodes to be updated, the nodes have to be a part of a HA Pair. + - Requires ONTAP 9.9 with REST. + aliases: + - node + - nodes_to_update + type: list + elements: str + package_version: + required: true + description: + - Specifies the package version to update ONTAP software to, or to be deleted. + type: str + package_url: + type: str + description: + - Specifies the package URL to download the package. + - Required when state is present unless the package is already present on disk. + ignore_validation_warning: + description: + - Allows the update to continue if warnings are encountered during the validation phase. + default: False + type: bool + aliases: + - skip_warnings + download_only: + description: + - Allows to download image without update. + default: False + type: bool + version_added: 20.4.0 + validate_after_download: + description: + - By default validation is not run after download, as it is already done in the update step. + - This option is useful when using C(download_only), for instance when updating a MetroCluster system. + default: False + type: bool + version_added: 21.11.0 + stabilize_minutes: + description: + - Number of minutes that the update should wait after a takeover or giveback is completed. + - Requires ONTAP 9.8 with REST. + type: int + version_added: 20.6.0 + timeout: + description: + - how long to wait for the update to complete, in seconds. + default: 1800 + type: int + force_update: + description: + - force an update, even if package_version matches what is reported as installed. + default: false + type: bool + version_added: 20.11.0 +short_description: NetApp ONTAP Update Software +version_added: 2.7.0 +notes: + - ONTAP expects the nodes to be in HA pairs to perform non disruptive updates. + - In a single node setup, the node is updated, and rebooted. + - Supports ZAPI and REST. + - Support check_mode. +''' + +EXAMPLES = """ + + - name: ONTAP software update + netapp.ontap.na_ontap_software_update: + state: present + nodes: vsim1 + package_url: "{{ url }}" + package_version: "{{ version_name }}" + ignore_validation_warning: True + download_only: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +validation_reports: + description: C(validation_reports_after_update) as a string, for backward compatibility. + returned: always + type: str +validation_reports_after_download: + description: + - List of validation reports, after downloading the software package. + - Note that it is different from the validation checks reported after attempting an update. + returned: always + type: list +validation_reports_after_updates: + description: + - List of validation reports, after attemting to update the software package. + returned: always + type: list +""" + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPSoftwareUpdate: + """ + Class with ONTAP software update methods + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + nodes=dict(required=False, type='list', elements='str', aliases=["node", "nodes_to_update"]), + package_version=dict(required=True, type='str'), + package_url=dict(required=False, type='str'), + ignore_validation_warning=dict(required=False, type='bool', default=False, aliases=["skip_warnings"]), + download_only=dict(required=False, type='bool', default=False), + stabilize_minutes=dict(required=False, type='int'), + timeout=dict(required=False, type='int', default=1800), + force_update=dict(required=False, type='bool', default=False), + validate_after_download=dict(required=False, type='bool', default=False), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters.get('https') is not True: + self.module.fail_json(msg='Error: https parameter must be True') + self.validation_reports_after_download = ['only available if validate_after_download is true'] + self.versions = ['not available with force_update'] + self.rest_api = OntapRestAPI(self.module) + partially_supported_rest_properties = [['stabilize_minutes', (9, 8)], ['nodes', (9, 9)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties) + if not self.use_rest: + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + @staticmethod + def cluster_image_get_iter(): + """ + Compose NaElement object to query current version + :return: NaElement object for cluster-image-get-iter with query + """ + cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get-iter') + query = netapp_utils.zapi.NaElement('query') + cluster_image_info = netapp_utils.zapi.NaElement('cluster-image-info') + query.add_child_elem(cluster_image_info) + cluster_image_get.add_child_elem(query) + return cluster_image_get + + def cluster_image_get_versions(self): + """ + Get current cluster image versions for each node + :return: list of tuples (node_id, node_version) or empty list + """ + if self.use_rest: + return self.cluster_image_get_rest('versions') + cluster_image_get_iter = self.cluster_image_get_iter() + try: + result = self.server.invoke_successfully(cluster_image_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching cluster image details: %s: %s' + % (self.parameters['package_version'], to_native(error)), + exception=traceback.format_exc()) + return ([(image_info.get_child_content('node-id'), image_info.get_child_content('current-version')) + for image_info in result.get_child_by_name('attributes-list').get_children()] + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0 else []) + + def cluster_image_get_for_node(self, node_name): + """ + Get current cluster image info for given node + """ + cluster_image_get = netapp_utils.zapi.NaElement('cluster-image-get') + cluster_image_get.add_new_child('node-id', node_name) + try: + result = self.server.invoke_successfully(cluster_image_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching cluster image details for %s: %s' + % (node_name, to_native(error)), + exception=traceback.format_exc()) + # return cluster image version + image_info = self.na_helper.safe_get(result, ['attributes', 'cluster-image-info']) + if image_info: + return image_info.get_child_content('node-id'), image_info.get_child_content('current-version') + return None, None + + @staticmethod + def get_localname(tag): + return netapp_utils.zapi.etree.QName(tag).localname + + def cluster_image_update_progress_get(self, ignore_connection_error=True): + """ + Get current cluster image update progress info + :return: Dictionary of cluster image update progress if query successful, else return None + """ + cluster_update_progress_get = netapp_utils.zapi.NaElement('cluster-image-update-progress-info') + cluster_update_progress_info = {} + try: + result = self.server.invoke_successfully(cluster_update_progress_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + # return empty dict on error to satisfy package delete upon image update + if ignore_connection_error: + return cluster_update_progress_info + self.module.fail_json(msg='Error fetching cluster image update progress details: %s' % (to_native(error)), + exception=traceback.format_exc()) + # return cluster image update progress details + if result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info'): + update_progress_info = result.get_child_by_name('attributes').get_child_by_name('ndu-progress-info') + cluster_update_progress_info['overall_status'] = update_progress_info.get_child_content('overall-status') + cluster_update_progress_info['completed_node_count'] = update_progress_info.\ + get_child_content('completed-node-count') + reports = update_progress_info.get_child_by_name('validation-reports') + if reports: + cluster_update_progress_info['validation_reports'] = [] + for report in reports.get_children(): + checks = {} + for check in report.get_children(): + checks[self.get_localname(check.get_name())] = check.get_content() + cluster_update_progress_info['validation_reports'].append(checks) + return cluster_update_progress_info + + def cluster_image_update(self): + """ + Update current cluster image + """ + cluster_update_info = netapp_utils.zapi.NaElement('cluster-image-update') + cluster_update_info.add_new_child('package-version', self.parameters['package_version']) + cluster_update_info.add_new_child('ignore-validation-warning', + str(self.parameters['ignore_validation_warning'])) + if self.parameters.get('stabilize_minutes'): + cluster_update_info.add_new_child('stabilize-minutes', + self.na_helper.get_value_for_int(False, self.parameters['stabilize_minutes'])) + if self.parameters.get('nodes'): + cluster_nodes = netapp_utils.zapi.NaElement('nodes') + for node in self.parameters['nodes']: + cluster_nodes.add_new_child('node-name', node) + cluster_update_info.add_child_elem(cluster_nodes) + try: + self.server.invoke_successfully(cluster_update_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + msg = 'Error updating cluster image for %s: %s' % (self.parameters['package_version'], to_native(error)) + cluster_update_progress_info = self.cluster_image_update_progress_get(ignore_connection_error=True) + validation_reports = cluster_update_progress_info.get('validation_reports') + if validation_reports is None: + validation_reports = self.cluster_image_validate() + self.module.fail_json( + msg=msg, + validation_reports=str(validation_reports), + validation_reports_after_download=self.validation_reports_after_download, + validation_reports_after_update=validation_reports, + exception=traceback.format_exc()) + + def cluster_image_package_download(self): + """ + Get current cluster image package download + :return: True if package already exists, else return False + """ + cluster_image_package_download_info = netapp_utils.zapi.NaElement('cluster-image-package-download') + cluster_image_package_download_info.add_new_child('package-url', self.parameters['package_url']) + try: + self.server.invoke_successfully(cluster_image_package_download_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + # Error 18408 denotes Package image with the same name already exists + if to_native(error.code) == "18408": + return self.check_for_existing_package(error) + else: + self.module.fail_json(msg='Error downloading cluster image package for %s: %s' + % (self.parameters['package_url'], to_native(error)), + exception=traceback.format_exc()) + return False + + def cluster_image_package_delete(self): + """ + Delete current cluster image package + """ + if self.use_rest: + return self.cluster_image_package_delete_rest() + cluster_image_package_delete_info = netapp_utils.zapi.NaElement('cluster-image-package-delete') + cluster_image_package_delete_info.add_new_child('package-version', self.parameters['package_version']) + try: + self.server.invoke_successfully(cluster_image_package_delete_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting cluster image package for %s: %s' + % (self.parameters['package_version'], to_native(error)), + exception=traceback.format_exc()) + + def cluster_image_package_download_progress(self): + """ + Get current cluster image package download progress + :return: Dictionary of cluster image download progress if query successful, else return None + """ + cluster_image_package_download_progress_info = netapp_utils.zapi.\ + NaElement('cluster-image-get-download-progress') + try: + result = self.server.invoke_successfully( + cluster_image_package_download_progress_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching cluster image package download progress for %s: %s' + % (self.parameters['package_url'], to_native(error)), + exception=traceback.format_exc()) + # return cluster image download progress details + cluster_download_progress_info = {} + if result.get_child_by_name('progress-status'): + cluster_download_progress_info['progress_status'] = result.get_child_content('progress-status') + cluster_download_progress_info['progress_details'] = result.get_child_content('progress-details') + cluster_download_progress_info['failure_reason'] = result.get_child_content('failure-reason') + return cluster_download_progress_info + return None + + def cluster_image_validate(self): + """ + Validate that NDU is feasible. + :return: List of dictionaries + """ + if self.use_rest: + return self.cluster_image_validate_rest() + cluster_image_validation_info = netapp_utils.zapi.NaElement('cluster-image-validate') + cluster_image_validation_info.add_new_child('package-version', self.parameters['package_version']) + try: + result = self.server.invoke_successfully( + cluster_image_validation_info, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + return 'Error running cluster image validate: %s' % to_native(error) + # return cluster validation report + cluster_report_info = [] + if result.get_child_by_name('cluster-image-validation-report-list'): + for report in result.get_child_by_name('cluster-image-validation-report-list').get_children(): + info = self.na_helper.safe_get(report, ['required-action', 'required-action-info']) + required_action = {} + if info: + for action in info.get_children(): + if action.get_content(): + required_action[self.get_localname(action.get_name())] = action.get_content() + cluster_report_info.append(dict( + ndu_check=report.get_child_content('ndu-check'), + ndu_status=report.get_child_content('ndu-status'), + required_action=required_action + )) + return cluster_report_info + + def is_update_required(self): + ''' return True if at least one node is not at the correct version ''' + if self.parameters.get('nodes') and not self.use_rest: + self.versions = [self.cluster_image_get_for_node(node) for node in self.parameters['nodes']] + else: + self.versions = self.cluster_image_get_versions() + # set comnprehension not supported on 2.6 + current_versions = set([x[1] for x in self.versions]) + if len(current_versions) != 1: + # mixed set, need to update + return True + # only update if versions differ + return current_versions.pop() != self.parameters['package_version'] + + def download_software(self): + if self.use_rest: + return self.download_software_rest() + package_exists = self.cluster_image_package_download() + if package_exists is False: + cluster_download_progress = self.cluster_image_package_download_progress() + while cluster_download_progress is None or cluster_download_progress.get('progress_status') == 'async_pkg_get_phase_running': + time.sleep(10) + cluster_download_progress = self.cluster_image_package_download_progress() + if cluster_download_progress.get('progress_status') != 'async_pkg_get_phase_complete': + self.module.fail_json(msg='Error downloading package: %s - installed versions: %s' + % (cluster_download_progress['failure_reason'], self.versions)) + + def update_software(self): + if self.use_rest: + return self.update_software_rest() + self.cluster_image_update() + # delete package once update is completed + cluster_update_progress = {} + time_left = self.parameters['timeout'] + polling_interval = 25 + # assume in_progress if dict is empty + while time_left > 0 and cluster_update_progress.get('overall_status', 'in_progress') == 'in_progress': + time.sleep(polling_interval) + time_left -= polling_interval + cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=True) + + if cluster_update_progress.get('overall_status') != 'completed': + cluster_update_progress = self.cluster_image_update_progress_get(ignore_connection_error=False) + + validation_reports = cluster_update_progress.get('validation_reports') + + if cluster_update_progress.get('overall_status') == 'completed': + self.cluster_image_package_delete() + return validation_reports + + if cluster_update_progress.get('overall_status') == 'in_progress': + msg = 'Timeout error' + action = ' Should the timeout value be increased? Current value is %d seconds.' % self.parameters['timeout'] + action += ' The software update continues in background.' + else: + msg = 'Error' + action = '' + msg += ' updating image using ZAPI: overall_status: %s.' % (cluster_update_progress.get('overall_status', 'cannot get status')) + msg += action + self.module.fail_json( + msg=msg, + validation_reports=str(validation_reports), + validation_reports_after_download=self.validation_reports_after_download, + validation_reports_after_update=validation_reports) + + def cluster_image_get_rest(self, what, fail_on_error=True): + """return field information for: + - nodes if what == versions + - validation_results if what == validation_results + - state if what == state + - any other field if what is a valid field name + call fail_json when there is an error and fail_on_error is True + return a tuple (info, error) when fail_on_error is False + return info when fail_on_error is Trie + """ + api = 'cluster/software' + field = 'nodes' if what == 'versions' else what + record, error = rest_generic.get_one_record(self.rest_api, api, fields=field) + # record can be empty or these keys may not be present when validation is still in progress + optional_fields = ['validation_results'] + info, error_msg = None, None + if error or not record: + if error or field not in optional_fields: + error_msg = "Error fetching software information for %s: %s" % (field, error or 'no record calling %s' % api) + elif what == 'versions' and 'nodes' in record: + nodes = self.parameters.get('nodes') + if nodes: + known_nodes = [node['name'] for node in record['nodes']] + unknown_nodes = [node for node in nodes if node not in known_nodes] + if unknown_nodes: + error_msg = 'Error: node%s not found in cluster: %s.' % ('s' if len(unknown_nodes) > 1 else '', ', '.join(unknown_nodes)) + info = [(node['name'], node['version']) for node in record['nodes'] if nodes is None or node['name'] in nodes] + elif field in record: + info = record[field] + elif field not in optional_fields: + error_msg = "Unexpected results for what: %s, record: %s" % (what, record) + if fail_on_error and error_msg: + self.module.fail_json(msg=error_msg) + return info if fail_on_error else (info, error_msg) + + def check_for_existing_package(self, error): + ''' ONTAP returns 'Package image with the same name already exists' + if a file with the same name already exists. + We need to confirm the version: if the version matches, we're good, + otherwise we need to error out. + ''' + versions, error2 = self.cluster_image_packages_get_rest() + if self.parameters['package_version'] in versions: + return True + if versions: + self.module.fail_json(msg='Error: another package with the same file name exists: found: %s' % ', '.join(versions)) + self.module.fail_json(msg='Error: ONTAP reported package already exists, but no package found: %s, getting versions: %s' % (error, error2)) + + def cluster_image_download_get_rest(self): + api = 'cluster/software/download' + field = 'message,state' + record, error = rest_generic.get_one_record(self.rest_api, api, fields=field) + if record: + return record.get('state'), record.get('message'), error + return None, None, error + + def download_software_rest(self): + api = 'cluster/software/download' + body = { + 'url': self.parameters['package_url'] + } + dummy, error = rest_generic.post_async(self.rest_api, api, body, timeout=0, job_timeout=self.parameters['timeout']) + if error: + if 'Package image with the same name already exists' in error: + return self.check_for_existing_package(error) + if 'Software get operation already in progress' in error: + self.module.warn("A download is already in progress. Resuming existing download.") + return self.wait_for_condition(self.is_download_complete_rest, 'image download state') + self.module.fail_json(msg="Error downloading software: %s - current versions: %s" % (error, self.versions)) + return False + + def is_download_complete_rest(self): + state, dummy, error = self.cluster_image_download_get_rest() + if error: + return None, None, error + return state in ['success', 'failure'], state, error + + def cluster_image_validate_rest(self): + api = 'cluster/software' + body = { + 'version': self.parameters['package_version'] + } + query = { + 'validate_only': 'true' + } + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query, timeout=0, job_timeout=self.parameters['timeout']) + if error: + return "Error validating software: %s" % error + + validation_results = None + for __ in range(30): + time.sleep(10) + validation_results = self.cluster_image_get_rest('validation_results') + if validation_results is not None: + break + return validation_results + + def update_software_rest(self): + """install the software and invoke clean up and reporting function + """ + state = self.cluster_image_update_rest() + self.post_update_tasks_rest(state) + + def post_update_tasks_rest(self, state): + """delete software package when installation is successful + report validation_results whether update succeeded or failed + """ + # fetch validation results + (validation_reports, error) = self.cluster_image_get_rest('validation_results', fail_on_error=False) + + # success: delete and return + if state == 'completed': + self.cluster_image_package_delete() + return error or validation_reports + + # report error + if state == 'in_progress': + msg = 'Timeout error' + action = ' Should the timeout value be increased? Current value is %d seconds.' % self.parameters['timeout'] + action += ' The software update continues in background.' + else: + msg = 'Error' + action = '' + msg += ' updating image using REST: state: %s.' % state + msg += action + self.module.fail_json( + msg=msg, + validation_reports_after_download=self.validation_reports_after_download, + validation_reports_after_update=(error or validation_reports)) + + def error_is_fatal(self, error): + ''' a node may not be available during reboot, or the job may be lost ''' + if not error: + return False + self.rest_api.log_debug('transient_error', error) + error_messages = [ + "entry doesn't exist", # job not found + "Max retries exceeded with url: /api/cluster/jobs" # connection errors + ] + return all(error_message not in error for error_message in error_messages) + + def cluster_image_update_rest(self): + api = 'cluster/software' + body = { + 'version': self.parameters['package_version'] + } + query = {} + params_to_rest = { + # module keys to REST keys + 'ignore_validation_warning': 'skip_warnings', + 'nodes': 'nodes_to_update', + 'stabilize_minutes': 'stabilize_minutes', + } + for (param_key, rest_key) in params_to_rest.items(): + value = self.parameters.get(param_key) + if value is not None: + query[rest_key] = ','.join(value) if rest_key == 'nodes_to_update' else value + # With ONTAP 9.8, the job persists until the node is rebooted + # With ONTAP 9.9, the job returns quickly + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query=query or None, timeout=0, job_timeout=self.parameters['timeout']) + if self.error_is_fatal(error): + validation_results, v_error = self.cluster_image_get_rest('validation_results', fail_on_error=False) + self.module.fail_json(msg="Error updating software: %s - validation results: %s" % (error, v_error or validation_results)) + + return self.wait_for_condition(self.is_update_complete_rest, 'image update state') + + def is_update_complete_rest(self): + state, error = self.cluster_image_get_rest('state', fail_on_error=False) + if error: + return None, None, error + return state in ['paused_by_user', 'paused_on_error', 'completed', 'canceled', 'failed'], state, error + + def wait_for_condition(self, is_task_complete, description): + ''' loop until a condition is met + is_task_complete is a function that returns (is_complete, state, error) + if is_complete is True, the condition is met and state is returned + if is complete is False, the task is called until a timeout is reached + errors are ignored unless there are too many or a timeout is reached + ''' + errors = [] + for __ in range(1 + self.parameters['timeout'] // 60): # floor division + time.sleep(60) + is_complete, state, error = is_task_complete() + if error: + self.rest_api.log_debug('transient_error', error) + errors.append(error) + if len(errors) < 20: + continue + break + errors = [] + if is_complete: + break + if errors: + msg = "Error: unable to read %s, using timeout %s." % (description, self.parameters['timeout']) + msg += " Last error: %s" % error + msg += " All errors: %s" % ' -- '.join(errors) + self.module.fail_json(msg=msg) + return state + + def cluster_image_packages_get_zapi(self): + versions = [] + packages_obj = netapp_utils.zapi.NaElement('cluster-image-package-local-get-iter') + try: + result = self.server.invoke_successfully(packages_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting list of local packages: %s' % to_native(error), exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + packages_info = result.get_child_by_name('attributes-list') + versions = [packages_details.get_child_content('package-version') for packages_details in packages_info.get_children()] + return versions, None + + def cluster_image_packages_get_rest(self): + if not self.use_rest: + return self.cluster_image_packages_get_zapi() + api = 'cluster/software/packages' + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, fields='version') + return [record.get('version') for record in records] if records else [], error + + def cluster_image_package_delete_rest(self): + api = 'cluster/software/packages' + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['package_version']) + if error: + self.module.fail_json(msg='Error deleting cluster software package for %s: %s' % (self.parameters['package_version'], error)) + + def apply(self): + """ + Apply action to update ONTAP software + """ + # TODO: cluster image update only works for HA configurations. + # check if node image update can be used for other cases. + versions, error = self.cluster_image_packages_get_rest() + already_downloaded = not error and self.parameters['package_version'] in versions + if self.parameters['state'] == 'absent': + if error: + self.module.fail_json(msg='Error: unable to fetch local package list: %s' % error) + changed = already_downloaded + else: + if already_downloaded: + self.module.warn('Package %s is already present, skipping download.' % self.parameters['package_version']) + elif not self.parameters.get('package_url'): + self.module.fail_json(msg='Error: packague_url is a required parameter to download the software package.') + changed = self.parameters['force_update'] or self.is_update_required() + validation_reports_after_update = ['only available after update'] + + results = {} + if not self.module.check_mode and changed: + if self.parameters['state'] == 'absent': + self.cluster_image_package_delete() + else: + if not already_downloaded: + already_downloaded = self.download_software() + if self.parameters['validate_after_download']: + self.validation_reports_after_download = self.cluster_image_validate() + if self.parameters['download_only']: + changed = not already_downloaded + else: + validation_reports_after_update = self.update_software() + results = { + 'validation_reports': str(validation_reports_after_update), + 'validation_reports_after_download': self.validation_reports_after_download, + 'validation_reports_after_update': validation_reports_after_update + } + + self.module.exit_json(changed=changed, **results) + + +def main(): + """Execute action""" + package_obj = NetAppONTAPSoftwareUpdate() + package_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py new file mode 100644 index 000000000..bf6ca9031 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ssh_command.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +''' +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Run cli commands on ONTAP over SSH using paramiko. + - Output is returned in C(stdout) and C(stderr), and also as C(stdout_lines), C(stdout_lines_filtered), C(stderr_lines). + - Note that the module can succeed even though the command failed. You need to analyze stdout and check the results. + - If the SSH host key is unknown and accepted, C(warnings) is updated. + - Options related to ZAPI or REST APIs are ignored. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_ssh_command +short_description: NetApp ONTAP Run any cli command over plain SSH using paramiko. +version_added: 20.8.0 +options: + command: + description: + - a string containing the command and arguments. + required: true + type: str + privilege: + description: + - privilege level at which to run the command, eg admin, advanced. + - if set, the command is prefixed with C(set -privilege ;). + type: str + accept_unknown_host_keys: + description: + - When false, reject the connection if the host key is not in known_hosts file. + - When true, if the host key is unknown, accept it, but report a warning. + - Note that the key is not added to the file. You could add the key by manually using SSH. + type: bool + default: false + include_lines: + description: + - return only lines containing string pattern in C(stdout_lines_filtered) + default: '' + type: str + exclude_lines: + description: + - return only lines containing string pattern in C(stdout_lines_filtered) + default: '' + type: str + service_processor: + description: + - whether the target system is ONTAP or the service processor (SP) + - only menaningful when privilege is set + aliases: [sp] + default: false + type: bool +''' + +EXAMPLES = """ + - name: run ontap cli command using SSH + na_ontap_ssh_command: + hostname: "{{ hostname }}" + username: "{{ admin_username }}" + password: "{{ admin_password }}" + command: version + + # Same as above, with parameters + - name: run ontap cli command + na_ontap_ssh_command: + hostname: "{{ hostname }}" + username: "{{ admin_username }}" + password: "{{ admin_password }}" + command: node show -fields node,health,uptime,model + privilege: admin + + # Same as above, but with lines filtering + - name: run ontap cli command + na_ontap_ssh_command: + hostname: "{{ hostname }}" + username: "{{ admin_username }}" + password: "{{ admin_password }}" + command: node show -fields node,health,uptime,model + exclude_lines: 'ode ' # Exclude lines with 'Node ' or 'node' + # use with caution! + accept_unknown_host_keys: true + privilege: admin + + - name: run ontap SSH command on SP + na_ontap_ssh_command: + # <<: *sp_login + command: sp switch-version + privilege: diag + sp: true + register: result + - debug: var=result +""" + +RETURN = """ +stdout_lines_filtered: + description: + - In addition to stdout and stdout_lines, a list of non-white lines, excluding last and failed login information. + - The list can be further refined using the include_lines and exclude_lines filters. + returned: always + type: list +""" + +import traceback +import warnings +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +try: + import paramiko + HAS_PARAMIKO = True +except ImportError: + HAS_PARAMIKO = False + + +class NetAppONTAPSSHCommand(object): + ''' calls a CLI command using SSH''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + command=dict(required=True, type='str'), + privilege=dict(required=False, type='str'), + accept_unknown_host_keys=dict(required=False, type='bool', default=False), + include_lines=dict(required=False, type='str', default=''), + exclude_lines=dict(required=False, type='str', default=''), + service_processor=dict(required=False, type='bool', default=False, aliases=['sp']), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + parameters = self.module.params + # set up state variables + self.command = parameters['command'] + self.privilege = parameters['privilege'] + self.include_lines = parameters['include_lines'] + self.exclude_lines = parameters['exclude_lines'] + self.accept_unknown_host_keys = parameters['accept_unknown_host_keys'] + self.service_processor = parameters['service_processor'] + self.warnings = list() + self.failed = False + + if not HAS_PARAMIKO: + self.module.fail_json(msg="the python paramiko module is required") + + client = paramiko.SSHClient() + client.load_system_host_keys() # load ~/.ssh/known_hosts if it exists + if self.accept_unknown_host_keys: + # accept unknown key, but raise a python warning + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + + with warnings.catch_warnings(record=True) as wngs: + try: + client.connect(hostname=parameters['hostname'], username=parameters['username'], password=parameters['password']) + if len(wngs) > 0: + self.warnings.extend([str(warning.message) for warning in wngs]) + except paramiko.SSHException as exc: + self.module.fail_json(msg="SSH connection failed: %s" % repr(exc)) + + self.client = client + + def parse_output(self, out): + out_string = out.read() + # ONTAP makes copious use of \r + out_string = out_string.replace(b'\r\r\n', b'\n') + out_string = out_string.replace(b'\r\n', b'\n') + return out_string + + def run_ssh_command(self, command): + ''' calls SSH ''' + try: + stdin, stdout, stderr = self.client.exec_command(command) + except paramiko.SSHException as exc: + self.module.fail_json(msg='Error running command %s: %s' % + (command, to_native(exc)), + exception=traceback.format_exc()) + stdin.close() # if we don't close, we may see a TypeError + return stdout, stderr + + def filter_output(self, output): + ''' Generate stdout_lines_filtered list + Remove login information if found in the first non white lines + ''' + result = list() + find_banner = True + for line in output.splitlines(): + try: + stripped_line = line.strip().decode() + except Exception as exc: + self.warnings.append("Unable to decode ONTAP output. Skipping filtering. Error: %s" % repr(exc)) + result.append('ERROR: truncated, cannot decode: %s' % line) + self.failed = False + return result + + if not stripped_line: + continue + if find_banner and stripped_line.startswith(('Last login time:', 'Unsuccessful login attempts since last login:')): + continue + find_banner = False + if self.exclude_lines: + if self.include_lines in stripped_line and self.exclude_lines not in stripped_line: + result.append(stripped_line) + elif self.include_lines: + if self.include_lines in stripped_line: + result.append(stripped_line) + else: + result.append(stripped_line) + + return result + + def run_command(self): + ''' calls SSH ''' + command = self.command + if self.privilege is not None: + if self.service_processor: + command = "priv set %s;%s" % (self.privilege, command) + else: + command = "set -privilege %s;%s" % (self.privilege, command) + stdout, stderr = self.run_ssh_command(command) + stdout_string = self.parse_output(stdout) + stdout_filtered = self.filter_output(stdout_string) + return stdout_string, stdout_filtered, self.parse_output(stderr) + + def apply(self): + ''' calls the command and returns raw output ''' + changed = True + stdout, filtered, stderr = '', '', '' + if not self.module.check_mode: + stdout, filtered, stderr = self.run_command() + if stderr: + self.failed = True + self.module.exit_json(changed=changed, failed=self.failed, stdout=stdout, stdout_lines_filtered=filtered, stderr=stderr, warnings=self.warnings) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppONTAPSSHCommand() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py new file mode 100644 index 000000000..4446371d1 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py @@ -0,0 +1,248 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = """ +module: na_ontap_storage_auto_giveback +short_description: Enables or disables NetApp ONTAP storage auto giveback for a specified node +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Enable or disable storage auto giveback +options: + name: + description: + - Specifies the node name to enable or disable storage auto giveback on. + required: true + type: str + + auto_giveback_enabled: + description: + - specifies whether auto give back should be enabled or disabled + required: true + type: bool + + auto_giveback_after_panic_enabled: + description: + - specifies whether auto give back on panic should be enabled or disabled + type: bool + +""" + +EXAMPLES = """ + - name: Enable storage auto giveback + na_ontap_storage_auto_giveback: + name: node1 + auto_giveback_enabled: true + auto_giveback_after_panic_enabled: true + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Disable storage auto giveback + na_ontap_storage_auto_giveback: + name: node1 + auto_giveback_enabled: false + auto_giveback_after_panic_enabled: false + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + +""" + +RETURN = """ + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppOntapStorageAutoGiveback(object): + """ + Enable or disable storage failover for a specified node + """ + def __init__(self): + """ + Initialize the ONTAP Storage auto giveback class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + auto_giveback_enabled=dict(required=True, type='bool'), + auto_giveback_after_panic_enabled=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg='The python NetApp-Lib module is required') + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_storage_auto_giveback(self): + """ + get the storage failover giveback options for a given node + :return: dict for options + """ + return_value = None + + if self.use_rest: + + api = "private/cli/storage/failover" + query = { + 'fields': 'node,auto_giveback,auto_giveback_after_panic', + 'node': self.parameters['name'], + } + message, error = self.rest_api.get(api, query) + records, error = rrh.check_for_0_or_1_records(api, message, error) + + if error is None and records is not None: + return_value = { + 'name': message['records'][0]['node'], + 'auto_giveback_enabled': message['records'][0]['auto_giveback'], + 'auto_giveback_after_panic_enabled': message['records'][0]['auto_giveback_after_panic'] + } + + if error: + self.module.fail_json(msg=error) + + if not records: + error = "REST API did not return failover options for node %s" % (self.parameters['name']) + self.module.fail_json(msg=error) + + else: + + storage_auto_giveback_get_iter = netapp_utils.zapi.NaElement('cf-get-iter') + + try: + result = self.server.invoke_successfully(storage_auto_giveback_get_iter, True) + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting auto giveback info for node %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + if result.get_child_by_name('attributes-list'): + attributes_list = result.get_child_by_name('attributes-list') + for storage_failover_info_attributes in attributes_list.get_children(): + + sfo_node_info = storage_failover_info_attributes.get_child_by_name('sfo-node-info') + node_related_info = sfo_node_info.get_child_by_name('node-related-info') + + if node_related_info.get_child_content('node') == self.parameters['name']: + + sfo_options_info = storage_failover_info_attributes.get_child_by_name('sfo-options-info') + options_related_info = sfo_options_info.get_child_by_name('options-related-info') + sfo_giveback_options_info = options_related_info.get_child_by_name('sfo-giveback-options-info') + giveback_options = sfo_giveback_options_info.get_child_by_name('giveback-options') + + return_value = { + 'name': node_related_info.get_child_content('node'), + 'auto_giveback_enabled': self.na_helper.get_value_for_bool( + True, options_related_info.get_child_content('auto-giveback-enabled')), + 'auto_giveback_after_panic_enabled': self.na_helper.get_value_for_bool( + True, giveback_options.get_child_content('auto-giveback-after-panic-enabled')), + } + break + + return return_value + + def modify_storage_auto_giveback(self): + """ + Modifies storage failover giveback options for a specified node + """ + if self.use_rest: + api = "private/cli/storage/failover" + body = dict() + query = { + 'node': self.parameters['name'] + } + + body['auto_giveback'] = self.parameters['auto_giveback_enabled'] + if 'auto_giveback_after_panic_enabled' in self.parameters: + body['auto_giveback_after_panic'] = self.parameters['auto_giveback_after_panic_enabled'] + + dummy, error = self.rest_api.patch(api, body, query) + if error: + self.module.fail_json(msg=error) + + else: + + storage_auto_giveback_enable = netapp_utils.zapi.NaElement('cf-modify-iter') + attributes_info = netapp_utils.zapi.NaElement('options-related-info-modify') + query_info = netapp_utils.zapi.NaElement('options-related-info-modify') + + attributes_info.add_new_child('node', self.parameters['name']) + attributes_info.add_new_child('auto-giveback-enabled', self.na_helper.get_value_for_bool( + from_zapi=False, value=self.parameters['auto_giveback_enabled'])) + + if 'auto_giveback_after_panic_enabled' in self.parameters: + sfo_give_back_options_info_modify = netapp_utils.zapi.NaElement('sfo-giveback-options-info-modify') + give_back_options_modify = netapp_utils.zapi.NaElement('giveback-options-modify') + give_back_options_modify.add_new_child('auto-giveback-after-panic-enabled', self.na_helper.get_value_for_bool( + from_zapi=False, value=self.parameters['auto_giveback_after_panic_enabled'])) + sfo_give_back_options_info_modify.add_child_elem(give_back_options_modify) + attributes_info.add_child_elem(sfo_give_back_options_info_modify) + + query = netapp_utils.zapi.NaElement('query') + attributes = netapp_utils.zapi.NaElement("attributes") + query.add_child_elem(query_info) + attributes.add_child_elem(attributes_info) + + storage_auto_giveback_enable.add_child_elem(query) + storage_auto_giveback_enable.add_child_elem(attributes) + + try: + self.server.invoke_successfully(storage_auto_giveback_enable, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying auto giveback for node %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current = self.get_storage_auto_giveback() + self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if not self.module.check_mode: + self.modify_storage_auto_giveback() + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ + Enables or disables NetApp ONTAP storage auto giveback for a specified node + """ + obj = NetAppOntapStorageAutoGiveback() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py new file mode 100644 index 000000000..ff9306ac6 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_failover.py @@ -0,0 +1,208 @@ +#!/usr/bin/python + +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_storage_failover +short_description: Enables or disables NetApp Ontap storage failover for a specified node +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.3.0' +author: NetApp Ansible Team (@carchi8py) + +description: + - Enable or disable storage failover + +options: + + state: + description: + - Whether storage failover should be enabled (present) or disabled (absent). + choices: ['present', 'absent'] + default: present + type: str + + node_name: + description: + - Specifies the node name to enable or disable storage failover. + required: true + type: str + +""" + +EXAMPLES = """ +- name: Enable storage failover + netapp.ontap.na_ontap_storage_failover: + state: present + node_name: node1 + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Disable storage failover + netapp.ontap.na_ontap_storage_failover: + state: absent + node_name: node1 + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + +""" + +RETURN = """ + +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapStorageFailover: + """ + Enable or disable storage failover for a specified node + """ + def __init__(self): + """ + Initialize the Ontap Storage failover class + """ + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + node_name=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.parameters['is_enabled'] = self.parameters['state'] == 'present' + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_node_names(self): + api = "cluster/nodes" + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, fields='name') + if records and not error: + records = [record['name'] for record in records] + return records, error + + def get_node_names_as_str(self): + names, error = self.get_node_names() + if error: + return 'failed to get list of nodes: %s' % error + if names: + return 'current nodes: %s' % ', '.join(names) + return 'could not get node names' + + def get_storage_failover(self): + """ + get the storage failover for a given node + :return: dict of is-enabled: true if enabled is true None if not + """ + + if self.use_rest: + return_value = None + api = "cluster/nodes" + query = { + 'fields': 'uuid,ha.enabled', + 'name': self.parameters['node_name'] + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + + if error: + self.module.fail_json(msg=error) + + if not record: + msg = self.get_node_names_as_str() + error = "REST API did not return failover details for node %s, %s" % (self.parameters['node_name'], msg) + self.module.fail_json(msg=error) + + return_value = {'uuid': record['uuid']} + if 'ha' in record: + return_value['is_enabled'] = record['ha']['enabled'] + + else: + storage_failover_get_iter = netapp_utils.zapi.NaElement('cf-status') + storage_failover_get_iter.add_new_child('node', self.parameters['node_name']) + + try: + result = self.server.invoke_successfully(storage_failover_get_iter, True) + return_value = {'is_enabled': self.na_helper.get_value_for_bool(True, result.get_child_content('is-enabled'))} + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting storage failover info for node %s: %s' % ( + self.parameters['node_name'], to_native(error)), exception=traceback.format_exc()) + + return return_value + + def modify_storage_failover(self, current): + """ + Modifies storage failover for a specified node + """ + + if self.use_rest: + api = "cluster/nodes" + body = {'ha': {'enabled': self.parameters['is_enabled']}} + dummy, error = rest_generic.patch_async(self.rest_api, api, current['uuid'], body) + if error: + self.module.fail_json(msg=error) + + else: + + if self.parameters['state'] == 'present': + cf_service = 'cf-service-enable' + else: + cf_service = 'cf-service-disable' + + storage_failover_modify = netapp_utils.zapi.NaElement(cf_service) + storage_failover_modify.add_new_child('node', self.parameters['node_name']) + + try: + self.server.invoke_successfully(storage_failover_modify, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying storage failover for node %s: %s' % ( + self.parameters['node_name'], to_native(error)), exception=traceback.format_exc()) + + def apply(self): + current = self.get_storage_failover() + self.na_helper.get_modified_attributes(current, self.parameters) + if self.parameters['is_enabled'] and 'is_enabled' not in current: + self.module.fail_json(msg='HA is not available on node: %s.' % self.parameters['node_name']) + + if self.na_helper.changed and not self.module.check_mode: + self.modify_storage_failover(current) + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ + Enables or disables NetApp Ontap storage failover for a specified node + """ + + obj = NetAppOntapStorageFailover() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py new file mode 100644 index 000000000..9d5fc6c66 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm.py @@ -0,0 +1,939 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_svm +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_svm + +short_description: NetApp ONTAP SVM +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, modify or delete SVM on NetApp ONTAP + +options: + + state: + description: + - Whether the specified SVM should exist or not. + choices: ['present', 'absent'] + default: 'present' + type: str + + name: + description: + - The name of the SVM to manage. + - vserver is a convenient alias when using module_defaults. + type: str + required: true + aliases: + - vserver + + from_name: + description: + - Name of the SVM to be renamed + type: str + version_added: 2.7.0 + + admin_state: + description: + - when the SVM is created, it will be in the running state, unless specified otherwise. + - This is ignored with ZAPI. + choices: ['running', 'stopped'] + type: str + version_added: 21.15.0 + + root_volume: + description: + - Root volume of the SVM. + - Cannot be modified after creation. + type: str + + root_volume_aggregate: + description: + - The aggregate on which the root volume will be created. + - Cannot be modified after creation. + type: str + + root_volume_security_style: + description: + - Security Style of the root volume. + - When specified as part of the vserver-create, + this field represents the security style for the Vserver root volume. + - When specified as part of vserver-get-iter call, + this will return the list of matching Vservers. + - The 'unified' security style, which applies only to Infinite Volumes, + cannot be applied to a Vserver's root volume. + - Cannot be modified after creation. + choices: ['unix', 'ntfs', 'mixed', 'unified'] + type: str + + allowed_protocols: + description: + - Allowed Protocols. + - This field represent the list of protocols allowed on the Vserver. + - When part of modify, + this field should include the existing list + along with new protocol list to be added to prevent data disruptions. + - Possible values + - nfs NFS protocol, + - cifs CIFS protocol, + - fcp FCP protocol, + - iscsi iSCSI protocol, + - ndmp NDMP protocol, + - http HTTP protocol - ZAPI only, + - nvme NVMe protocol + type: list + elements: str + + services: + description: + - Enabled Protocols, only available with REST. + - The service will be started if needed. A valid license may be required. + - C(enabled) is not supported for CIFS, to enable it use na_ontap_cifs_server. + - If a service is not present, it is left unchanged. + type: dict + version_added: 21.10.0 + suboptions: + cifs: + description: + - CIFS protocol service + type: dict + suboptions: + allowed: + description: If true, an SVM administrator can manage the CIFS service. If false, only the cluster administrator can manage the service. + type: bool + iscsi: + description: + - iSCSI protocol service + type: dict + suboptions: + allowed: + description: If true, an SVM administrator can manage the iSCSI service. If false, only the cluster administrator can manage the service. + type: bool + enabled: + description: If allowed, setting to true enables the iSCSI service. + type: bool + fcp: + description: + - FCP protocol service + type: dict + suboptions: + allowed: + description: If true, an SVM administrator can manage the FCP service. If false, only the cluster administrator can manage the service. + type: bool + enabled: + description: If allowed, setting to true enables the FCP service. + type: bool + nfs: + description: + - NFS protocol service + type: dict + suboptions: + allowed: + description: If true, an SVM administrator can manage the NFS service. If false, only the cluster administrator can manage the service. + type: bool + enabled: + description: If allowed, setting to true enables the NFS service. + type: bool + nvme: + description: + - nvme protocol service + type: dict + suboptions: + allowed: + description: If true, an SVM administrator can manage the NVMe service. If false, only the cluster administrator can manage the service. + type: bool + enabled: + description: If allowed, setting to true enables the NVMe service. + type: bool + ndmp: + description: + - Network Data Management Protocol service + type: dict + suboptions: + allowed: + description: + - If this is set to true, an SVM administrator can manage the NDMP service + - If it is false, only the cluster administrator can manage the service. + - Requires ONTAP 9.7 or later. + type: bool + version_added: 21.24.0 + aggr_list: + description: + - List of aggregates assigned for volume operations. + - These aggregates could be shared for use with other Vservers. + - When specified as part of a vserver-create, + this field represents the list of aggregates + that are assigned to the Vserver for volume operations. + - When part of vserver-get-iter call, + this will return the list of Vservers + which have any of the aggregates specified as part of the aggr list. + type: list + elements: str + + ipspace: + description: + - IPSpace name + - Cannot be modified after creation. + type: str + version_added: 2.7.0 + + snapshot_policy: + description: + - Default snapshot policy setting for all volumes of the Vserver. + This policy will be assigned to all volumes created in this + Vserver unless the volume create request explicitly provides a + snapshot policy or volume is modified later with a specific + snapshot policy. A volume-level snapshot policy always overrides + the default Vserver-wide snapshot policy. + version_added: 2.7.0 + type: str + + language: + description: + - Language to use for the SVM + - Default to C.UTF-8 + - Possible values Language + - c POSIX + - ar Arabic + - cs Czech + - da Danish + - de German + - en English + - en_us English (US) + - es Spanish + - fi Finnish + - fr French + - he Hebrew + - hr Croatian + - hu Hungarian + - it Italian + - ja Japanese euc-j + - ja_v1 Japanese euc-j + - ja_jp.pck Japanese PCK (sjis) + - ja_jp.932 Japanese cp932 + - ja_jp.pck_v2 Japanese PCK (sjis) + - ko Korean + - no Norwegian + - nl Dutch + - pl Polish + - pt Portuguese + - ro Romanian + - ru Russian + - sk Slovak + - sl Slovenian + - sv Swedish + - tr Turkish + - zh Simplified Chinese + - zh.gbk Simplified Chinese (GBK) + - zh_tw Traditional Chinese euc-tw + - zh_tw.big5 Traditional Chinese Big 5 + - utf8mb4 + - Most of the values accept a .utf_8 suffix, e.g. fr.utf_8 + type: str + version_added: 2.7.0 + + subtype: + description: + - The subtype for vserver to be created. + - Cannot be modified after creation. + choices: ['default', 'dp_destination', 'sync_source', 'sync_destination'] + type: str + version_added: 2.7.0 + + comment: + description: + - When specified as part of a vserver-create, this field represents the comment associated with the Vserver. + - When part of vserver-get-iter call, this will return the list of matching Vservers. + type: str + version_added: 2.8.0 + + ignore_rest_unsupported_options: + description: + - When true, ignore C(root_volume), C(root_volume_aggregate), C(root_volume_security_style) options if target supports REST. + - Ignored when C(use_rest) is set to never. + type: bool + default: false + version_added: 21.10.0 + + max_volumes: + description: + - Maximum number of volumes that can be created on the vserver. + - Expects an integer or C(unlimited). + type: str + version_added: 21.12.0 + + web: + description: + - web services security configuration. + - requires ONTAP 9.8 or later for certificate name. + - requires ONTAP 9.10.1 or later for the other options. + type: dict + suboptions: + certificate: + description: + - name of certificate used by cluster and node management interfaces for TLS connection requests. + - The certificate must be of type "server". + type: str + client_enabled: + description: whether client authentication is enabled. + type: bool + ocsp_enabled: + description: whether online certificate status protocol verification is enabled. + type: bool +''' + +EXAMPLES = """ + + - name: Create SVM + netapp.ontap.na_ontap_svm: + state: present + name: ansibleVServer + root_volume: vol1 + root_volume_aggregate: aggr1 + root_volume_security_style: mixed + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create SVM + netapp.ontap.na_ontap_svm: + state: present + services: + cifs: + allowed: true + fcp: + allowed: true + nfs: + allowed: true + enabled: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + + - name: Stop SVM REST + netapp.ontap.na_ontap_svm: + state: present + name: ansibleVServer + admin_state: stopped + use_rest: always + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" +import copy +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver, zapis_svm + + +class NetAppOntapSVM(): + ''' create, delete, modify, rename SVM (aka vserver) ''' + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str', aliases=['vserver']), + from_name=dict(required=False, type='str'), + admin_state=dict(required=False, type='str', choices=['running', 'stopped']), + root_volume=dict(type='str'), + root_volume_aggregate=dict(type='str'), + root_volume_security_style=dict(type='str', choices=['unix', + 'ntfs', + 'mixed', + 'unified' + ]), + allowed_protocols=dict(type='list', elements='str'), + aggr_list=dict(type='list', elements='str'), + ipspace=dict(type='str', required=False), + snapshot_policy=dict(type='str', required=False), + language=dict(type='str', required=False), + subtype=dict(type='str', choices=['default', 'dp_destination', 'sync_source', 'sync_destination']), + comment=dict(type='str', required=False), + ignore_rest_unsupported_options=dict(type='bool', default=False), + max_volumes=dict(type='str'), + # TODO: add CIFS options, and S3 + services=dict(type='dict', options=dict( + cifs=dict(type='dict', options=dict(allowed=dict(type='bool'))), + iscsi=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))), + fcp=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))), + nfs=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))), + nvme=dict(type='dict', options=dict(allowed=dict(type='bool'), enabled=dict(type='bool'))), + ndmp=dict(type='dict', options=dict(allowed=dict(type='bool'))), + )), + web=dict(type='dict', options=dict( + certificate=dict(type='str'), + client_enabled=dict(type='bool'), + ocsp_enabled=dict(type='bool'), + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('allowed_protocols', 'services'), + ('services', 'root_volume'), + ('services', 'root_volume_aggregate'), + ('services', 'root_volume_security_style')] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Ontap documentation uses C.UTF-8, but actually stores as c.utf_8. + if 'language' in self.parameters and self.parameters['language'].lower() == 'c.utf-8': + self.parameters['language'] = 'c.utf_8' + + self.rest_api = OntapRestAPI(self.module) + # with REST, to force synchronous operations + self.timeout = self.rest_api.timeout + # with REST, to know which protocols to look for + self.allowable_protocols_rest = netapp_utils.get_feature(self.module, 'svm_allowable_protocols_rest') + self.allowable_protocols_zapi = netapp_utils.get_feature(self.module, 'svm_allowable_protocols_zapi') + self.use_rest = self.validate_options() + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + if self.parameters.get('admin_state') is not None: + self.parameters.pop('admin_state') + self.module.warn('admin_state is ignored when ZAPI is used.') + + def validate_int_or_string(self, value, astring): + if value is None or value == astring: + return + try: + int_value = int(value) + except ValueError: + int_value = None + if int_value is None or str(int_value) != value: + self.module.fail_json(msg="Error: expecting int value or '%s', got: %s - %s" % (astring, value, int_value)) + + def validate_options(self): + + # root volume not supported with rest api + unsupported_rest_properties = ['root_volume', 'root_volume_aggregate', 'root_volume_security_style'] + required_unsupported_rest_properties = [] if self.parameters['ignore_rest_unsupported_options'] else unsupported_rest_properties + ignored_unsupported_rest_properties = unsupported_rest_properties if self.parameters['ignore_rest_unsupported_options'] else [] + used_required_unsupported_rest_properties = [x for x in required_unsupported_rest_properties if x in self.parameters] + used_ignored_unsupported_rest_properties = [x for x in ignored_unsupported_rest_properties if x in self.parameters] + use_rest, error = self.rest_api.is_rest(used_required_unsupported_rest_properties) + if error is not None: + self.module.fail_json(msg=error) + if use_rest and used_ignored_unsupported_rest_properties: + self.module.warn('Using REST and ignoring: %s' % used_ignored_unsupported_rest_properties) + for attr in used_ignored_unsupported_rest_properties: + del self.parameters[attr] + if use_rest and 'aggr_list' in self.parameters and self.parameters['aggr_list'] == ['*']: + self.module.warn("Using REST and ignoring aggr_list: '*'") + del self.parameters['aggr_list'] + if use_rest and self.parameters.get('allowed_protocols') is not None: + # python 2.6 does not support dict comprehension with k: v + self.parameters['services'] = dict( + # using old semantics, anything not present is disallowed + (protocol, {'allowed': protocol in self.parameters['allowed_protocols']}) + for protocol in self.allowable_protocols_rest + ) + + if self.parameters.get('allowed_protocols'): + allowable = self.allowable_protocols_rest if use_rest else self.allowable_protocols_zapi + errors = [ + 'Unexpected value %s in allowed_protocols.' % protocol + for protocol in self.parameters['allowed_protocols'] + if protocol not in allowable + ] + if errors: + self.module.fail_json(msg='Error - %s' % ' '.join(errors)) + if use_rest and self.parameters.get('services') and not self.parameters.get('allowed_protocols') and self.parameters['services'].get('ndmp')\ + and not self.rest_api.meets_rest_minimum_version(use_rest, 9, 7): + self.module.fail_json(msg=self.rest_api.options_require_ontap_version('ndmp', '9.7', use_rest=use_rest)) + if self.parameters.get('services') and not use_rest: + self.module.fail_json(msg=self.rest_api.options_require_ontap_version('services', use_rest=use_rest)) + if self.parameters.get('web'): + if not use_rest or not self.rest_api.meets_rest_minimum_version(use_rest, 9, 8, 0): + self.module.fail_json(msg=self.rest_api.options_require_ontap_version('web', '9.8', use_rest=use_rest)) + if not self.rest_api.meets_rest_minimum_version(use_rest, 9, 10, 1): + suboptions = ('client_enabled', 'ocsp_enabled') + for suboption in suboptions: + if self.parameters['web'].get(suboption) is not None: + self.module.fail_json(msg=self.rest_api.options_require_ontap_version(suboptions, '9.10.1', use_rest=use_rest)) + if self.parameters['web'].get('certificate'): + # so that we can compare UUIDs while using a more friendly name in the user interface + self.parameters['web']['certificate'] = {'name': self.parameters['web']['certificate']} + self.set_certificate_uuid() + + self.validate_int_or_string(self.parameters.get('max_volumes'), 'unlimited') + return use_rest + + def clean_up_output(self, vserver_details): + vserver_details['root_volume'] = None + vserver_details['root_volume_aggregate'] = None + vserver_details['root_volume_security_style'] = None + vserver_details['aggr_list'] = [aggr['name'] for aggr in vserver_details['aggregates']] + vserver_details.pop('aggregates') + vserver_details['ipspace'] = vserver_details['ipspace']['name'] + vserver_details['snapshot_policy'] = vserver_details['snapshot_policy']['name'] + vserver_details['admin_state'] = vserver_details.pop('state') + if 'max_volumes' in vserver_details: + vserver_details['max_volumes'] = str(vserver_details['max_volumes']) + if vserver_details.get('web') is None and self.parameters.get('web'): + # force an entry to enable modify + vserver_details['web'] = { + 'certificate': { + # ignore name, as only certificate UUID is supported in svm/svms/uuid/web + 'uuid': vserver_details['certificate']['uuid'] if 'certificate' in vserver_details else None, + }, + 'client_enabled': None, + 'ocsp_enabled': None + } + + services = {} + # REST returns allowed: True/False with recent versions, and a list of protocols in allowed_protocols for older versions + allowed_protocols = (None if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1) + else vserver_details.get('allowed_protocols')) + + for protocol in self.allowable_protocols_rest: + # protocols are not present when the vserver is stopped + allowed = self.na_helper.safe_get(vserver_details, [protocol, 'allowed']) + if allowed is None and allowed_protocols is not None: + # earlier ONTAP versions + allowed = protocol in allowed_protocols + enabled = self.na_helper.safe_get(vserver_details, [protocol, 'enabled']) + if allowed is not None or enabled is not None: + services[protocol] = {} + if allowed is not None: + services[protocol]['allowed'] = allowed + if enabled is not None: + services[protocol]['enabled'] = enabled + + if services: + vserver_details['services'] = services + + return vserver_details + + def get_certificates(self, cert_type): + """Retrieve list of certificates""" + api = 'security/certificates' + query = { + 'svm.name': self.parameters['name'], + 'type': cert_type + } + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error retrieving certificates: %s' % error) + return [record['name'] for record in records] if records else [] + + def set_certificate_uuid(self): + """Retrieve certicate uuid for 9.8 or later""" + api = 'security/certificates' + query = { + 'name': self.parameters['web']['certificate']['name'], + 'svm.name': self.parameters['name'], + 'type': 'server' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error retrieving certificate %s: %s' % (self.parameters['web']['certificate'], error)) + if not record: + self.module.fail_json(msg='Error certificate not found: %s. Current certificates with type=server: %s' + % (self.parameters['web']['certificate'], self.get_certificates('server'))) + self.parameters['web']['certificate']['uuid'] = record['uuid'] + + def get_web_service(self, uuid): + """Retrieve web service info for 9.10.1 or later""" + api = 'svm/svms/%s/web' % uuid + record, error = rest_generic.get_one_record(self.rest_api, api) + if error: + self.module.fail_json(msg='Error retrieving web info: %s' % error) + return record + + def get_vserver(self, vserver_name=None): + """ + Checks if vserver exists. + + :return: + vserver object if vserver found + None if vserver is not found + :rtype: object/None + """ + if vserver_name is None: + vserver_name = self.parameters['name'] + + if self.use_rest: + fields = 'subtype,aggregates,language,snapshot_policy,ipspace,comment,nfs,cifs,fcp,iscsi,nvme,state' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + fields += ',max_volumes' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 8, 0) and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + # certificate is available starting with 9.7 and is deprecated with 9.10.1. + # we don't use certificate with 9.7 as name is only supported with 9.8 in /security/certificates + fields += ',certificate' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + fields += ',ndmp' + + record, error = rest_vserver.get_vserver(self.rest_api, vserver_name, fields) + if error: + self.module.fail_json(msg=error) + if record: + if self.parameters.get('web') and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # only collect the info if the user wants to configure the web service, and ONTAP supports it + record['web'] = self.get_web_service(record['uuid']) + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # 9.6 to 9.8 do not support max_volumes for svm/svms, using private/cli + record['allowed_protocols'], max_volumes = self.get_allowed_protocols_and_max_volumes() + if self.parameters.get('max_volumes') is not None: + record['max_volumes'] = max_volumes + return self.clean_up_output(copy.deepcopy(record)) + return None + + return zapis_svm.get_vserver(self.server, vserver_name) + + def create_vserver(self): + if self.use_rest: + self.create_vserver_rest() + else: + options = {'vserver-name': self.parameters['name']} + self.add_parameter_to_dict(options, 'root_volume', 'root-volume') + self.add_parameter_to_dict(options, 'root_volume_aggregate', 'root-volume-aggregate') + self.add_parameter_to_dict(options, 'root_volume_security_style', 'root-volume-security-style') + self.add_parameter_to_dict(options, 'language', 'language') + self.add_parameter_to_dict(options, 'ipspace', 'ipspace') + self.add_parameter_to_dict(options, 'snapshot_policy', 'snapshot-policy') + self.add_parameter_to_dict(options, 'subtype', 'vserver-subtype') + self.add_parameter_to_dict(options, 'comment', 'comment') + vserver_create = netapp_utils.zapi.NaElement.create_node_with_children('vserver-create', **options) + try: + self.server.invoke_successfully(vserver_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error provisioning SVM %s: %s' + % (self.parameters['name'], to_native(exc)), + exception=traceback.format_exc()) + # add allowed-protocols, aggr-list, max_volume after creation + # since vserver-create doesn't allow these attributes during creation + # python 2.6 does not support dict comprehension {k: v for ...} + options = dict( + (key, self.parameters[key]) + for key in ('allowed_protocols', 'aggr_list', 'max_volumes') + if self.parameters.get(key) + ) + if options: + self.modify_vserver(options) + + def create_body_contents(self, modify=None): + keys_to_modify = self.parameters.keys() if modify is None else modify.keys() + protocols_to_modify = self.parameters.get('services', {}) if modify is None else modify.get('services', {}) + simple_keys = ['name', 'language', 'ipspace', 'snapshot_policy', 'subtype', 'comment'] + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + simple_keys.append('max_volumes') + body = dict( + (key, self.parameters[key]) + for key in simple_keys + if self.parameters.get(key) and key in keys_to_modify + ) + # admin_state is only supported in modify + if modify and 'admin_state' in keys_to_modify: + body['state'] = self.parameters['admin_state'] + if 'aggr_list' in keys_to_modify: + body['aggregates'] = [{'name': aggr} for aggr in self.parameters['aggr_list']] + if 'certificate' in keys_to_modify: + body['certificate'] = modify['certificate'] + allowed_protocols = {} + for protocol, config in protocols_to_modify.items(): + # Ansible sets unset suboptions to None + if not config: + continue + # Ansible sets unset suboptions to None + acopy = self.na_helper.filter_out_none_entries(config) + if modify is not None: + # REST does not allow to modify this directly + acopy.pop('enabled', None) + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # allowed is not supported in earlier REST versions + allowed = acopy.pop('allowed', None) + # if allowed is not set, retrieve current value + if allowed is not None: + allowed_protocols[protocol] = allowed + if acopy: + body[protocol] = acopy + return body, allowed_protocols + + def get_allowed_protocols_and_max_volumes(self): + # use REST CLI for older versions of ONTAP + query = {'vserver': self.parameters['name']} + fields = 'allowed_protocols' + if self.parameters.get('max_volumes') is not None: + fields += ',max_volumes' + response, error = rest_generic.get_one_record(self.rest_api, 'private/cli/vserver', query, fields) + if error: + self.module.fail_json(msg='Error getting vserver info: %s - %s' % (error, response)) + if response and 'max_volumes' in response: + max_volumes = str(response['max_volumes']) + allowed_protocols, max_volumes = [], None + if response and 'allowed_protocols' in response: + allowed_protocols = response['allowed_protocols'] + if response and 'max_volumes' in response: + max_volumes = str(response['max_volumes']) + return allowed_protocols, max_volumes + + def rest_cli_set_max_volumes(self): + # use REST CLI for older versions of ONTAP + query = {'vserver': self.parameters['name']} + body = {'max_volumes': self.parameters['max_volumes']} + response, error = rest_generic.patch_async(self.rest_api, 'private/cli/vserver', None, body, query) + if error: + self.module.fail_json(msg='Error updating max_volumes: %s - %s' % (error, response)) + + def rest_cli_add_remove_protocols(self, protocols): + protocols_to_add = [protocol for protocol, value in protocols.items() if value] + if protocols_to_add: + self.rest_cli_add_protocols(protocols_to_add) + protocols_to_delete = [protocol for protocol, value in protocols.items() if not value] + if protocols_to_delete: + self.rest_cli_remove_protocols(protocols_to_delete) + + def rest_cli_add_protocols(self, protocols): + # use REST CLI for older versions of ONTAP + query = {'vserver': self.parameters['name']} + body = {'protocols': protocols} + response, error = rest_generic.patch_async(self.rest_api, 'private/cli/vserver/add-protocols', None, body, query) + if error: + self.module.fail_json(msg='Error adding protocols: %s - %s' % (error, response)) + + def rest_cli_remove_protocols(self, protocols): + # use REST CLI for older versions of ONTAP + query = {'vserver': self.parameters['name']} + body = {'protocols': protocols} + response, error = rest_generic.patch_async(self.rest_api, 'private/cli/vserver/remove-protocols', None, body, query) + if error: + self.module.fail_json(msg='Error removing protocols: %s - %s' % (error, response)) + + def create_vserver_rest(self): + # python 2.6 does not support dict comprehension {k: v for ...} + body, allowed_protocols = self.create_body_contents() + dummy, error = rest_generic.post_async(self.rest_api, 'svm/svms', body, timeout=self.timeout) + if error: + self.module.fail_json(msg='Error in create: %s' % error) + # add max_volumes and update allowed protocols after creation for older ONTAP versions + if self.parameters.get('max_volumes') is not None and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + self.rest_cli_set_max_volumes() + if allowed_protocols: + self.rest_cli_add_remove_protocols(allowed_protocols) + + def delete_vserver(self, current=None): + if self.use_rest: + if current is None: + self.module.fail_json(msg='Internal error, expecting SVM object in delete') + dummy, error = rest_generic.delete_async(self.rest_api, 'svm/svms', current['uuid'], timeout=self.timeout) + if error: + self.module.fail_json(msg='Error in delete: %s' % error) + else: + vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-destroy', **{'vserver-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(vserver_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error deleting SVM %s: %s' + % (self.parameters['name'], to_native(exc)), + exception=traceback.format_exc()) + + def rename_vserver(self): + ''' ZAPI only, for REST it is handled as a modify''' + vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-rename', **{'vserver-name': self.parameters['from_name'], + 'new-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(vserver_rename, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error renaming SVM %s: %s' + % (self.parameters['from_name'], to_native(exc)), + exception=traceback.format_exc()) + + def modify_vserver(self, modify, current=None): + ''' + Modify vserver. + :param modify: list of modify attributes + :param current: with rest, SVM object to modify + ''' + if self.use_rest: + if current is None: + self.module.fail_json(msg='Internal error, expecting SVM object in modify.') + if not modify: + self.module.fail_json(msg='Internal error, expecting something to modify in modify.') + # REST reports an error if we modify the name and something else at the same time + if 'name' in modify: + body = {'name': modify['name']} + dummy, error = rest_generic.patch_async(self.rest_api, 'svm/svms', current['uuid'], body, timeout=self.timeout) + if error: + self.module.fail_json(msg='Error in rename: %s' % error, modify=modify) + del modify['name'] + if 'web' in modify and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + # certificate is a deprecated field for 9.10.1, only use it for 9.8 and 9.9 + uuid = self.na_helper.safe_get(modify, ['web', 'certificate', 'uuid']) + if uuid: + modify['certificate'] = {'uuid': uuid} + modify.pop('web') + body, allowed_protocols = self.create_body_contents(modify) + if body: + dummy, error = rest_generic.patch_async(self.rest_api, 'svm/svms', current['uuid'], body, timeout=self.timeout) + if error: + self.module.fail_json(msg='Error in modify: %s' % error, modify=modify) + # use REST CLI for max_volumes and allowed protocols with older ONTAP versions + if 'max_volumes' in modify and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + self.rest_cli_set_max_volumes() + if allowed_protocols: + self.rest_cli_add_remove_protocols(allowed_protocols) + if 'services' in modify: + self.modify_services(modify, current) + if 'web' in modify and self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + self.modify_web_services(modify['web'], current) + else: + zapis_svm.modify_vserver(self.server, self.module, self.parameters['name'], modify, self.parameters) + + def modify_services(self, modify, current): + apis = { + 'fcp': 'protocols/san/fcp/services', + 'iscsi': 'protocols/san/iscsi/services', + 'nfs': 'protocols/nfs/services', + 'nvme': 'protocols/nvme/services', + 'ndmp': 'protocols/ndmp/svms' + } + for protocol, config in modify['services'].items(): + enabled = config.get('enabled') + if enabled is None: + # nothing to do + continue + api = apis.get(protocol) + if not api: + self.module.fail_json(msg='Internal error, unexpecting service: %s.' % protocol) + if enabled: + # we don't know if the service is already started or not, link will tell us + link = self.na_helper.safe_get(current, [protocol, '_links', 'self', 'href']) + body = {'enabled': enabled} + if enabled and not link: + body['svm.name'] = self.parameters['name'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + else: + dummy, error = rest_generic.patch_async(self.rest_api, api, current['uuid'], body) + if error: + self.module.fail_json(msg='Error in modify service for %s: %s' % (protocol, error)) + + def modify_web_services(self, record, current): + """Patch web service for 9.10.1 or later""" + api = 'svm/svms/%s/web' % current['uuid'] + if 'certificate' in record: + # API only accepts a UUID + record['certificate'].pop('name', None) + body = self.na_helper.filter_out_none_entries(copy.deepcopy(record)) + if not body: + self.module.warn('Nothing to change: %s' % record) + return + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg='Error in modify web service for %s: %s' % (body, error)) + + def add_parameter_to_dict(self, adict, name, key=None, tostr=False): + ''' + add defined parameter (not None) to adict using key. + :param adict: a dictionary. + :param name: name in self.parameters. + :param key: key in adict. + :param tostr: boolean. + ''' + if key is None: + key = name + if self.parameters.get(name) is not None: + if tostr: + adict[key] = str(self.parameters.get(name)) + else: + adict[key] = self.parameters.get(name) + + def warn_when_possible_language_match(self, desired, current): + transformed = desired.lower().replace('-', '_') + if transformed == current: + self.module.warn("Attempting to change language from ONTAP value %s to %s. Use %s to suppress this warning and maintain idempotency." + % (current, desired, current)) + + def apply(self): + '''Call create/modify/delete operations.''' + current = self.get_vserver() + cd_action, rename = None, None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'create' and self.parameters.get('from_name'): + # create by renaming existing SVM + old_svm = self.get_vserver(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(old_svm, current) + if rename is None: + self.module.fail_json(msg='Error renaming SVM %s: no SVM with from_name %s.' % (self.parameters['name'], self.parameters['from_name'])) + if rename: + current = old_svm + cd_action = None + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else {} + if 'language' in modify: + self.warn_when_possible_language_match(modify['language'], current['language']) + fixed_attributes = ['root_volume', 'root_volume_aggregate', 'root_volume_security_style', 'subtype', 'ipspace'] + msgs = ['%s - current: %s - desired: %s' % (attribute, current[attribute], self.parameters[attribute]) + for attribute in fixed_attributes + if attribute in modify] + if msgs: + self.module.fail_json(msg='Error modifying SVM %s: cannot modify %s.' % (self.parameters['name'], ', '.join(msgs))) + + if self.na_helper.changed and not self.module.check_mode: + if rename: + if self.use_rest: + modify['name'] = self.parameters['name'] + else: + self.rename_vserver() + modify.pop('name', None) + # If rename is True, cd_action is None, but modify could be true or false. + if cd_action == 'create': + self.create_vserver() + if self.parameters.get('admin_state') == 'stopped': + current = self.get_vserver() + modify = {'admin_state': 'stopped'} + elif cd_action == 'delete': + self.delete_vserver(current) + if modify: + self.modify_vserver(modify, current) + if modify and 'aggr_list' in modify and '*' in modify['aggr_list']: + self.module.warn("na_ontap_svm: changed always 'True' when aggr_list is '*'.") + results = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**results) + + +def main(): + '''Apply vserver operations from playbook''' + svm = NetAppOntapSVM() + svm.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py new file mode 100644 index 000000000..c018a7c21 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_svm_options.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +short_description: NetApp ONTAP Modify SVM Options +author: NetApp Ansible Team (@carchi8py) +description: + - Modify ONTAP SVM Options + - Only Options that appear on "vserver options show" can be set + - This module only supports ZAPI and is deprecated. + - The final version of ONTAP to support ZAPI is 9.12.1. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +module: na_ontap_svm_options +version_added: 2.7.0 +options: + name: + description: + - Name of the option. + type: str + value: + description: + - Value of the option. + - Value must be in quote + type: str + vserver: + description: + - The name of the vserver to which this option belongs to. + required: True + type: str +''' + +EXAMPLES = """ + - name: Set SVM Options + na_ontap_svm_options: + vserver: "{{ netapp_vserver_name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + name: snmp.enable + value: 'on' +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppONTAPSvnOptions(object): + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + name=dict(required=False, type="str", default=None), + value=dict(required=False, type='str', default=None), + vserver=dict(required=True, type='str') + + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_deprecated(self.module) + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + return + + def set_options(self): + """ + Set a specific option + :return: None + """ + option_obj = netapp_utils.zapi.NaElement("options-set") + option_obj.add_new_child('name', self.parameters['name']) + option_obj.add_new_child('value', self.parameters['value']) + try: + self.server.invoke_successfully(option_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc()) + + def list_options(self): + """ + List all Options on the Vserver + :return: None + """ + option_obj = netapp_utils.zapi.NaElement("options-list-info") + try: + self.server.invoke_successfully(option_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc()) + + def is_option_set(self): + """ + Checks to see if an option is set or not + :return: If option is set return True, else return False + """ + option_obj = netapp_utils.zapi.NaElement("options-get-iter") + options_info = netapp_utils.zapi.NaElement("option-info") + if self.parameters.get('name') is not None: + options_info.add_new_child("name", self.parameters['name']) + if self.parameters.get('value') is not None: + options_info.add_new_child("value", self.parameters['value']) + if "vserver" in self.parameters.keys(): + if self.parameters['vserver'] is not None: + options_info.add_new_child("vserver", self.parameters['vserver']) + query = netapp_utils.zapi.NaElement("query") + query.add_child_elem(options_info) + option_obj.add_child_elem(query) + try: + result = self.server.invoke_successfully(option_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc()) + + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return True + return False + + def apply(self): + changed = False + is_set = self.is_option_set() + if not is_set: + if self.module.check_mode: + pass + else: + self.set_options() + changed = True + self.module.exit_json(changed=changed) + + +def main(): + """ + Execute action from playbook + :return: none + """ + cg_obj = NetAppONTAPSvnOptions() + cg_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py new file mode 100644 index 000000000..46344e381 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_ucadapter.py @@ -0,0 +1,303 @@ +#!/usr/bin/python + +# (c) 2018-2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- + +module: na_ontap_ucadapter +short_description: NetApp ONTAP UC adapter configuration +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - modify the UC adapter mode and type taking pending type and mode into account. + +options: + state: + description: + - Whether the specified adapter should exist. + required: false + choices: ['present'] + default: 'present' + type: str + + adapter_name: + description: + - Specifies the adapter name. + required: true + type: str + + node_name: + description: + - Specifies the adapter home node. + required: true + type: str + + mode: + description: + - Specifies the mode of the adapter. + type: str + + type: + description: + - Specifies the fc4 type of the adapter. + type: str + + pair_adapters: + description: + - Specifies the list of adapters which also need to be offline along with the current adapter during modifying. + - If specified adapter works in a group or pair, the other adapters might also need to offline before modify the specified adapter. + - The mode of pair_adapters are modified along with the adapter, the type of the pair_adapters are not modified. + type: list + elements: str + version_added: '20.6.0' + +''' + +EXAMPLES = ''' + - name: Modify adapter + netapp.ontap.na_ontap_adapter: + state: present + adapter_name: 0e + pair_adapters: 0f + node_name: laurentn-vsim1 + mode: fc + type: target + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapadapter: + ''' object to describe adapter info ''' + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present'], default='present', type='str'), + adapter_name=dict(required=True, type='str'), + node_name=dict(required=True, type='str'), + mode=dict(required=False, type='str'), + type=dict(required=False, type='str'), + pair_adapters=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.adapters_uuids = {} + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def get_adapter(self): + """ + Return details about the adapter + :param: + name : Name of the name of the adapter + + :return: Details about the adapter. None if not found. + :rtype: dict + """ + if self.use_rest: + return self.get_adapter_rest() + adapter_info = netapp_utils.zapi.NaElement('ucm-adapter-get') + adapter_info.add_new_child('adapter-name', self.parameters['adapter_name']) + adapter_info.add_new_child('node-name', self.parameters['node_name']) + try: + result = self.server.invoke_successfully(adapter_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching ucadapter details: %s: %s' + % (self.parameters['node_name'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('attributes'): + adapter_attributes = result.get_child_by_name('attributes').\ + get_child_by_name('uc-adapter-info') + return_value = { + 'mode': adapter_attributes.get_child_content('mode'), + 'pending-mode': adapter_attributes.get_child_content('pending-mode'), + 'type': adapter_attributes.get_child_content('fc4-type'), + 'pending-type': adapter_attributes.get_child_content('pending-fc4-type'), + 'status': adapter_attributes.get_child_content('status'), + } + return return_value + return None + + def modify_adapter(self): + """ + Modify the adapter. + """ + if self.use_rest: + return self.modify_adapter_rest() + params = {'adapter-name': self.parameters['adapter_name'], + 'node-name': self.parameters['node_name']} + if self.parameters.get('type') is not None: + params['fc4-type'] = self.parameters['type'] + if self.parameters.get('mode') is not None: + params['mode'] = self.parameters['mode'] + adapter_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'ucm-adapter-modify', ** params) + try: + self.server.invoke_successfully(adapter_modify, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error modifying adapter %s: %s' % (self.parameters['adapter_name'], to_native(e)), + exception=traceback.format_exc()) + + def online_or_offline_adapter(self, status, adapter_name): + """ + Bring a Fibre Channel target adapter offline/online. + """ + if self.use_rest: + return self.online_or_offline_adapter_rest(status, adapter_name) + if status == 'down': + adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-down') + elif status == 'up': + adapter = netapp_utils.zapi.NaElement('fcp-adapter-config-up') + adapter.add_new_child('fcp-adapter', adapter_name) + adapter.add_new_child('node', self.parameters['node_name']) + try: + self.server.invoke_successfully(adapter, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error trying to %s fc-adapter %s: %s' % (status, adapter_name, to_native(e)), + exception=traceback.format_exc()) + + def get_adapters_uuids(self): + missing_adapters = [] + adapters = [self.parameters['adapter_name']] + self.parameters.get('pair_adapters', []) + for adapter in adapters: + adapter_uuid = self.get_adapter_uuid(adapter) + if adapter_uuid is None: + missing_adapters.append(adapter) + else: + self.adapters_uuids[adapter] = adapter_uuid + if missing_adapters: + self.module.fail_json(msg="Error: Adapter(s) %s not exist" % (', ').join(missing_adapters)) + + def get_adapter_uuid(self, adapter): + api = 'network/fc/ports' + params = { + 'name': adapter, + 'node.name': self.parameters['node_name'], + 'fields': 'uuid' + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error fetching adapter %s uuid" % adapter) + return record['uuid'] if record else None + + def get_adapter_rest(self): + api = 'private/cli/ucadmin' + params = { + 'node': self.parameters['node_name'], + 'adapter': self.parameters['adapter_name'], + 'fields': 'pending_mode,pending_type,current_mode,current_type,status_admin' + } + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error fetching ucadapter details: %s: %s' + % (self.parameters['node_name'], to_native(error))) + if record: + return { + 'mode': self.na_helper.safe_get(record, ['current_mode']), + 'pending-mode': self.na_helper.safe_get(record, ['pending_mode']), + 'type': self.na_helper.safe_get(record, ['current_type']), + 'pending-type': self.na_helper.safe_get(record, ['pending_type']), + 'status': self.na_helper.safe_get(record, ['status_admin']) + } + return None + + def modify_adapter_rest(self): + api = 'private/cli/ucadmin' + query = { + 'node': self.parameters['node_name'], + 'adapter': self.parameters['adapter_name'] + } + body = {} + if self.parameters.get('type') is not None: + body['type'] = self.parameters['type'] + if self.parameters.get('mode') is not None: + body['mode'] = self.parameters['mode'] + dummy, error = rest_generic.patch_async(self.rest_api, api, None, body, query) + if error: + self.module.fail_json(msg='Error modifying adapter %s: %s' % (self.parameters['adapter_name'], to_native(error))) + + def online_or_offline_adapter_rest(self, status, adapter_name): + api = 'network/fc/ports' + body = {'enabled': True if status == 'up' else False} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.adapters_uuids[adapter_name], body) + if error: + self.module.fail_json(msg='Error trying to %s fc-adapter %s: %s' % (status, adapter_name, to_native(error))) + + def apply(self): + ''' calling all adapter features ''' + changed = False + current = self.get_adapter() + + def need_to_change(expected, pending, current): + if expected is None: + return False + elif pending is not None: + return pending != expected + elif current is not None: + return current != expected + return False + + if current: + if self.parameters.get('type') is not None: + changed = need_to_change(self.parameters['type'], current['pending-type'], current['type']) + changed = changed or need_to_change(self.parameters.get('mode'), current['pending-mode'], current['mode']) + if changed and self.use_rest: + self.get_adapters_uuids() + if changed and not self.module.check_mode: + self.online_or_offline_adapter('down', self.parameters['adapter_name']) + if self.parameters.get('pair_adapters') is not None: + for adapter in self.parameters['pair_adapters']: + self.online_or_offline_adapter('down', adapter) + self.modify_adapter() + self.online_or_offline_adapter('up', self.parameters['adapter_name']) + if self.parameters.get('pair_adapters') is not None: + for adapter in self.parameters['pair_adapters']: + self.online_or_offline_adapter('up', adapter) + + self.module.exit_json(changed=changed) + + +def main(): + adapter = NetAppOntapadapter() + adapter.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py new file mode 100644 index 000000000..76f28ad96 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_group.py @@ -0,0 +1,459 @@ +#!/usr/bin/python +""" +na_ontap_unix_group +""" + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +author: NetApp Ansible Team (@carchi8py) +description: + - "Create/Delete Unix user group" +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_unix_group +options: + state: + description: + - Whether the specified group should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - Specifies UNIX group's name, unique for each group. + - Non-modifiable. + required: true + type: str + + id: + description: + - Specifies an identification number for the UNIX group. + - Group ID is unique for each UNIX group. + - Required for create, modifiable. + type: int + + vserver: + description: + - Specifies the Vserver for the UNIX group. + - Non-modifiable. + required: true + type: str + + skip_name_validation: + description: + - Specifies if group name validation is skipped. + type: bool + + users: + description: + - Specifies the users associated with this group. Should be comma separated. + - It represents the expected state of a list of users at any time. + - Add a user into group if it is specified in expected state but not in current state. + - Delete a user from group if it is specified in current state but not in expected state. + - To delete all current users, use '' as value. + type: list + elements: str + version_added: 2.9.0 + +short_description: NetApp ONTAP UNIX Group +version_added: 2.8.0 + +""" + +EXAMPLES = """ + - name: Create UNIX group + na_ontap_unix_group: + state: present + name: SampleGroup + vserver: ansibleVServer + id: 2 + users: user1,user2 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete all users in UNIX group + na_ontap_unix_group: + state: present + name: SampleGroup + vserver: ansibleVServer + users: '' + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete UNIX group + na_ontap_unix_group: + state: absent + name: SampleGroup + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapUnixGroup: + """ + Common operations to manage UNIX groups + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + id=dict(required=False, type='int'), + skip_name_validation=dict(required=False, type='bool'), + vserver=dict(required=True, type='str'), + users=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + if self.use_rest: + self.parameters['users'] = self.safe_strip(self.parameters.get('users')) if self.parameters.get('users') is not None else None + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + msg = 'REST requires ONTAP 9.9.1 or later for UNIX group APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not self.use_rest: + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.set_playbook_zapi_key_map() + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def safe_strip(self, users): + """ strip the given user """ + return [user.strip() for user in users if len(user.strip())] + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_string_keys = { + 'name': 'group-name' + } + self.na_helper.zapi_int_keys = { + 'id': 'group-id' + } + self.na_helper.zapi_bool_keys = { + 'skip_name_validation': 'skip-name-validation' + } + + def get_unix_group(self): + """ + Checks if the UNIX group exists. + + :return: + dict() if group found + None if group is not found + """ + + get_unix_group = netapp_utils.zapi.NaElement('name-mapping-unix-group-get-iter') + attributes = { + 'query': { + 'unix-group-info': { + 'group-name': self.parameters['name'], + 'vserver': self.parameters['vserver'], + } + } + } + get_unix_group.translate_struct(attributes) + try: + result = self.server.invoke_successfully(get_unix_group, enable_tunneling=True) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + group_info = result['attributes-list']['unix-group-info'] + group_details = dict() + else: + return None + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting UNIX group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + for item_key, zapi_key in self.na_helper.zapi_string_keys.items(): + group_details[item_key] = group_info[zapi_key] + for item_key, zapi_key in self.na_helper.zapi_int_keys.items(): + group_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True, + value=group_info[zapi_key]) + if group_info.get_child_by_name('users') is not None: + group_details['users'] = [user.get_child_content('user-name') + for user in group_info.get_child_by_name('users').get_children()] + else: + group_details['users'] = None + return group_details + + def create_unix_group(self): + """ + Creates an UNIX group in the specified Vserver + + :return: None + """ + if self.parameters.get('id') is None: + self.module.fail_json(msg='Error: Missing a required parameter for create: (id)') + + group_create = netapp_utils.zapi.NaElement('name-mapping-unix-group-create') + group_details = {} + for item in self.parameters: + if item in self.na_helper.zapi_string_keys: + zapi_key = self.na_helper.zapi_string_keys.get(item) + group_details[zapi_key] = self.parameters[item] + elif item in self.na_helper.zapi_bool_keys: + zapi_key = self.na_helper.zapi_bool_keys.get(item) + group_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False, + value=self.parameters[item]) + elif item in self.na_helper.zapi_int_keys: + zapi_key = self.na_helper.zapi_int_keys.get(item) + group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True, + value=self.parameters[item]) + group_create.translate_struct(group_details) + try: + self.server.invoke_successfully(group_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating UNIX group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if self.parameters.get('users') is not None: + self.modify_users_in_group() + + def delete_unix_group(self): + """ + Deletes an UNIX group from a vserver + + :return: None + """ + group_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'name-mapping-unix-group-destroy', **{'group-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(group_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing UNIX group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_unix_group(self, params): + """ + Modify an UNIX group from a vserver + :param params: modify parameters + :return: None + """ + # modify users requires separate zapi. + if 'users' in params: + self.modify_users_in_group() + if len(params) == 1: + return + + group_modify = netapp_utils.zapi.NaElement('name-mapping-unix-group-modify') + group_details = {'group-name': self.parameters['name']} + for key in params: + if key in self.na_helper.zapi_int_keys: + zapi_key = self.na_helper.zapi_int_keys.get(key) + group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True, + value=params[key]) + group_modify.translate_struct(group_details) + + try: + self.server.invoke_successfully(group_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying UNIX group %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_users_in_group(self): + """ + Add/delete one or many users in a UNIX group + + :return: None + """ + current_users = self.get_unix_group().get('users') + expect_users = self.parameters.get('users') + + if current_users is None: + current_users = [] + if expect_users[0] == '' and len(expect_users) == 1: + expect_users = [] + users_to_remove = list(set(current_users) - set(expect_users)) + users_to_add = list(set(expect_users) - set(current_users)) + if len(users_to_add) > 0: + for user in users_to_add: + add_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-add-user') + group_details = {'group-name': self.parameters['name'], 'user-name': user} + add_user.translate_struct(group_details) + try: + self.server.invoke_successfully(add_user, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error adding user %s to UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + if len(users_to_remove) > 0: + for user in users_to_remove: + delete_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-delete-user') + group_details = {'group-name': self.parameters['name'], 'user-name': user} + delete_user.translate_struct(group_details) + try: + self.server.invoke_successfully(delete_user, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json( + msg='Error deleting user %s from UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_unix_group_rest(self): + """ + Retrieves the UNIX groups for all of the SVMs. + UNIX users who are the members of the group are also displayed. + """ + if not self.use_rest: + return self.get_unix_group() + query = {'svm.name': self.parameters.get('vserver'), + 'name': self.parameters.get('name')} + api = 'name-services/unix-groups' + fields = 'svm.uuid,id,name,users.name' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error getting UNIX group: %s" % error) + if record: + if 'users' in record: + record['users'] = [user['name'] for user in record['users']] + return { + 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])}, + 'name': self.na_helper.safe_get(record, ['name']), + 'id': self.na_helper.safe_get(record, ['id']), + 'users': self.na_helper.safe_get(record, ['users']) + } + return None + + def create_unix_group_rest(self): + """ + Creates the local UNIX group configuration for the specified SVM. + Group name and group ID are mandatory parameters. + """ + if not self.use_rest: + return self.create_unix_group() + + body = {'svm.name': self.parameters.get('vserver')} + if 'name' in self.parameters: + body['name'] = self.parameters['name'] + if 'id' in self.parameters: + body['id'] = self.parameters['id'] + if 'skip_name_validation' in self.parameters: + body['skip_name_validation'] = self.parameters['skip_name_validation'] + api = 'name-services/unix-groups' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error creating UNIX group: %s" % error) + if self.parameters.get('users') is not None: + self.modify_users_in_group_rest() + + def modify_users_in_group_rest(self, current=None): + """ + Add/delete one or many users in a UNIX group + """ + body = {'records': []} + # current is to add user when creating a group + if not current: + current = self.get_unix_group_rest() + current_users = current['users'] or [] + expect_users = self.parameters.get('users') + users_to_remove = list(set(current_users) - set(expect_users)) + users_to_add = list(set(expect_users) - set(current_users)) + if len(users_to_add) > 0: + body['records'] = [{'name': user} for user in users_to_add] + if 'skip_name_validation' in self.parameters: + body['skip_name_validation'] = self.parameters['skip_name_validation'] + api = 'name-services/unix-groups/%s/%s/users' % (current['svm']['uuid'], current['name']) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error Adding user to UNIX group: %s" % error) + + if len(users_to_remove) > 0: + for user in users_to_remove: + api = 'name-services/unix-groups/%s/%s/users' % (current['svm']['uuid'], current['name']) + dummy, error = rest_generic.delete_async(self.rest_api, api, user, body=None) + if error is not None: + self.module.fail_json(msg="Error removing user from UNIX group: %s" % error) + + def delete_unix_group_rest(self, current): + """ + Deletes a UNIX user configuration for the specified SVM with rest API. + """ + if not self.use_rest: + return self.delete_unix_group() + + api = 'name-services/unix-groups/%s' % current['svm']['uuid'] + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name']) + if error is not None: + self.module.fail_json(msg="Error deleting UNIX group: %s" % error) + + def modify_unix_group_rest(self, modify, current=None): + """ + Updates UNIX group information for the specified user and SVM with rest API. + """ + if not self.use_rest: + return self.modify_unix_group(modify) + + if 'users' in modify: + self.modify_users_in_group_rest(current) + if len(modify) == 1: + return + + api = 'name-services/unix-groups/%s' % current['svm']['uuid'] + body = {} + if 'id' in modify: + body['id'] = modify['id'] + if body: + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body) + if error is not None: + self.module.fail_json(msg="Error on modifying UNIX group: %s" % error) + + def apply(self): + """ + Invoke appropriate action based on playbook parameters + + :return: None + """ + cd_action = None + current = self.get_unix_group_rest() + if current and current['users'] is None: + current['users'] = [] + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_unix_group_rest() + elif cd_action == 'delete': + self.delete_unix_group_rest(current) + else: + self.modify_unix_group_rest(modify, current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapUnixGroup() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py new file mode 100644 index 000000000..708bb48d0 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_unix_user.py @@ -0,0 +1,330 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' + +module: na_ontap_unix_user + +short_description: NetApp ONTAP UNIX users +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Create, delete or modify UNIX users local to ONTAP. + +options: + + state: + description: + - Whether the specified user should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - Specifies user's UNIX account name. + - REST support requires ONTAP version 9.9.0 or later. + - Non-modifiable. + required: true + type: str + + primary_gid: + description: + - Specifies the primary group identification number for the UNIX user. + - REST support requires ONTAP version 9.9.0 or later. + - Required for create, modifiable. + aliases: ['group_id'] + type: int + version_added: 21.21.0 + + vserver: + description: + - Specifies the Vserver for the UNIX user. + - REST support requires ONTAP version 9.9.0 or later. + - Non-modifiable. + required: true + type: str + + id: + description: + - Specifies an identification number for the UNIX user. + - REST support requires ONTAP version 9.9.0 or later. + - Required for create, modifiable. + type: int + + full_name: + description: + - Specifies the full name of the UNIX user + - REST support requires ONTAP version 9.9.0 or later. + - Optional for create, modifiable. + type: str +''' + +EXAMPLES = """ + + - name: Create UNIX User + netapp.ontap.na_ontap_unix_user: + state: present + name: SampleUser + vserver: ansibleVServer + group_id: 1 + id: 2 + full_name: Test User + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete UNIX User + netapp.ontap.na_ontap_unix_user: + state: absent + name: SampleUser + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapUnixUser: + """ + Common operations to manage users and roles. + """ + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + primary_gid=dict(required=False, type='int', aliases=['group_id']), + id=dict(required=False, type='int'), + full_name=dict(required=False, type='str'), + vserver=dict(required=True, type='str'), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 0): + msg = 'REST requires ONTAP 9.9.0 or later for unix-users APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + if not self.use_rest: + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_unix_user(self): + """ + Checks if the UNIX user exists. + + :return: + dict() if user found + None if user is not found + """ + get_unix_user = netapp_utils.zapi.NaElement('name-mapping-unix-user-get-iter') + attributes = { + 'query': { + 'unix-user-info': { + 'user-name': self.parameters['name'], + 'vserver': self.parameters['vserver'], + } + } + } + get_unix_user.translate_struct(attributes) + try: + result = self.server.invoke_successfully(get_unix_user, enable_tunneling=True) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + user_info = result['attributes-list']['unix-user-info'] + return {'primary_gid': int(user_info['group-id']), + 'id': int(user_info['user-id']), + 'full_name': user_info['full-name']} + return None + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting UNIX user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def create_unix_user(self): + """ + Creates an UNIX user in the specified Vserver + + :return: None + """ + if self.parameters.get('primary_gid') is None or self.parameters.get('id') is None: + self.module.fail_json(msg='Error: Missing one or more required parameters for create: (primary_gid, id)') + + user_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'name-mapping-unix-user-create', **{'user-name': self.parameters['name'], + 'group-id': str(self.parameters['primary_gid']), + 'user-id': str(self.parameters['id'])}) + if self.parameters.get('full_name') is not None: + user_create.add_new_child('full-name', self.parameters['full_name']) + + try: + self.server.invoke_successfully(user_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating UNIX user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_unix_user(self): + """ + Deletes an UNIX user from a vserver + + :return: None + """ + user_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'name-mapping-unix-user-destroy', **{'user-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(user_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing UNIX user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_unix_user(self, params): + user_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'name-mapping-unix-user-modify', **{'user-name': self.parameters['name']}) + for key in params: + if key == 'primary_gid': + user_modify.add_new_child('group-id', str(params['primary_gid'])) + if key == 'id': + user_modify.add_new_child('user-id', str(params['id'])) + if key == 'full_name': + user_modify.add_new_child('full-name', params['full_name']) + + try: + self.server.invoke_successfully(user_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying UNIX user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_unix_user_rest(self): + """ + Retrieves UNIX user information for the specified user and SVM with rest API. + """ + if not self.use_rest: + return self.get_unix_user() + query = {'svm.name': self.parameters.get('vserver'), + 'name': self.parameters.get('name')} + api = 'name-services/unix-users' + fields = 'svm.uuid,id,primary_gid,name,full_name' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg="Error on getting unix-user info: %s" % error) + if record: + return { + 'svm': {'uuid': self.na_helper.safe_get(record, ['svm', 'uuid'])}, + 'name': self.na_helper.safe_get(record, ['name']), + 'full_name': self.na_helper.safe_get(record, ['full_name']), + 'id': self.na_helper.safe_get(record, ['id']), + 'primary_gid': self.na_helper.safe_get(record, ['primary_gid']), + } + return None + + def create_unix_user_rest(self): + """ + Creates the local UNIX user configuration for an SVM with rest API. + """ + if not self.use_rest: + return self.create_unix_user() + + body = {'svm.name': self.parameters.get('vserver')} + for key in ('name', 'full_name', 'id', 'primary_gid'): + if key in self.parameters: + body[key] = self.parameters.get(key) + api = 'name-services/unix-users' + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error is not None: + self.module.fail_json(msg="Error on creating unix-user: %s" % error) + + def delete_unix_user_rest(self, current): + """ + Deletes a UNIX user configuration for the specified SVM with rest API. + """ + if not self.use_rest: + return self.delete_unix_user() + + api = 'name-services/unix-users/%s' % current['svm']['uuid'] + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['name']) + if error is not None: + self.module.fail_json(msg="Error on deleting unix-user: %s" % error) + + def modify_unix_user_rest(self, modify, current=None): + """ + Updates UNIX user information for the specified user and SVM with rest API. + """ + if not self.use_rest: + return self.modify_unix_user(modify) + + query = {'svm.name': self.parameters.get('vserver')} + body = {} + for key in ('full_name', 'id', 'primary_gid'): + if key in modify: + body[key] = modify[key] + api = 'name-services/unix-users/%s' % current['svm']['uuid'] + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['name'], body, query) + if error is not None: + self.module.fail_json(msg="Error on modifying unix-user: %s" % error) + + def apply(self): + """ + Invoke appropriate action based on playbook parameters + + :return: None + """ + cd_action = None + current = self.get_unix_user_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_unix_user_rest() + elif cd_action == 'delete': + self.delete_unix_user_rest(current) + else: + self.modify_unix_user_rest(modify, current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapUnixUser() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py new file mode 100644 index 000000000..7fada8ac6 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user.py @@ -0,0 +1,854 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_user +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_user + +short_description: NetApp ONTAP user configuration and management +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: +- Create or destroy users. + +options: + state: + description: + - Whether the specified user should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + name: + description: + - The name of the user to manage. + required: true + type: str + application_strs: + version_added: 21.6.0 + description: + - List of applications to grant access to. + - This option maintains backward compatibility with the existing C(applications) option, but is limited. + - It is recommended to use the new C(application_dicts) option which provides more flexibility. + - Creating a login with application console, telnet, rsh, and service-processor for a data vserver is not supported. + - Module supports both service-processor and service_processor choices. + - ZAPI requires service-processor, while REST requires service_processor, except for an issue with ONTAP 9.6 and 9.7. + - snmp is not supported in REST. + - Either C(application_dicts) or C(application_strs) is required. + type: list + elements: str + choices: ['console', 'http','ontapi','rsh','snmp','service_processor','service-processor','sp','ssh','telnet'] + aliases: + - application + - applications + application_dicts: + version_added: 21.6.0 + description: + - List of applications to grant access to. Provides better control on applications and authentication methods. + - Creating a login with application console, telnet, rsh, and service-processor for a data vserver is not supported. + - Module supports both service-processor and service_processor choices. + - ZAPI requires service-processor, while REST requires service_processor, except for an issue with ONTAP 9.6 and 9.7. + - snmp is not supported in REST. + - Either C(application_dicts) or C(application_strs) is required. + type: list + elements: dict + suboptions: + application: + description: name of the application. + type: str + choices: ['console', 'http','ontapi','rsh','snmp','service_processor','service-processor','sp','ssh','telnet'] + required: true + authentication_methods: + description: list of authentication methods for the application (see C(authentication_method)). + type: list + elements: str + choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml'] + required: true + second_authentication_method: + description: when using ssh, optional additional authentication method for MFA. + type: str + choices: ['none', 'password', 'publickey', 'nsswitch'] + authentication_method: + description: + - Authentication method for the application. If you need more than one method, use C(application_dicts). + - Not all authentication methods are valid for an application. + - Valid authentication methods for each application are as denoted in I(authentication_choices_description). + - Password for console application + - Password, domain, nsswitch, cert, saml for http application. + - Password, domain, nsswitch, cert, saml for ontapi application. + - SAML is only supported with REST, but seems to work with ZAPI as well. + - Community for snmp application (when creating SNMPv1 and SNMPv2 users). + - The usm and community for snmp application (when creating SNMPv3 users). + - Password for sp application. + - Password for rsh application. + - Password for telnet application. + - Password, publickey, domain, nsswitch for ssh application. + - Required when C(application_strs) is present. + type: str + choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml'] + set_password: + description: + - Password for the user account. + - It is ignored for creating snmp users, but is required for creating non-snmp users. + - For an existing user, this value will be used as the new password. + type: str + role_name: + description: + - The name of the role. Required when C(state=present) + type: str + lock_user: + description: + - Whether the specified user account is locked. + type: bool + vserver: + description: + - The name of the vserver to use. + - Required with ZAPI. + - With REST, ignore this option for creating cluster scoped interface. + aliases: + - svm + type: str + authentication_protocol: + description: + - Authentication protocol for the snmp user. + - When cluster FIPS mode is on, 'sha' and 'sha2-256' are the only possible and valid values. + - When cluster FIPS mode is off, the default value is 'none'. + - When cluster FIPS mode is on, the default value is 'sha'. + - Only available for 'usm' authentication method and non modifiable. + choices: ['none', 'md5', 'sha', 'sha2-256'] + type: str + version_added: '20.6.0' + authentication_password: + description: + - Password for the authentication protocol. This should be minimum 8 characters long. + - This is required for 'md5', 'sha' and 'sha2-256' authentication protocols and not required for 'none'. + - Only available for 'usm' authentication method and non modifiable. + type: str + version_added: '20.6.0' + engine_id: + description: + - Authoritative entity's EngineID for the SNMPv3 user. + - This should be specified as a hexadecimal string. + - Engine ID with first bit set to 1 in first octet should have a minimum of 5 or maximum of 32 octets. + - Engine Id with first bit set to 0 in the first octet should be 12 octets in length. + - Engine Id cannot have all zeros in its address. + - Only available for 'usm' authentication method and non modifiable. + type: str + version_added: '20.6.0' + privacy_protocol: + description: + - Privacy protocol for the snmp user. + - When cluster FIPS mode is on, 'aes128' is the only possible and valid value. + - When cluster FIPS mode is off, the default value is 'none'. When cluster FIPS mode is on, the default value is 'aes128'. + - Only available for 'usm' authentication method and non modifiable. + choices: ['none', 'des', 'aes128'] + type: str + version_added: '20.6.0' + privacy_password: + description: + - Password for the privacy protocol. This should be minimum 8 characters long. + - This is required for 'des' and 'aes128' privacy protocols and not required for 'none'. + - Only available for 'usm' authentication method and non modifiable. + type: str + version_added: '20.6.0' + remote_switch_ipaddress: + description: + - This optionally specifies the IP Address of the remote switch. + - The remote switch could be a cluster switch monitored by Cluster Switch Health Monitor (CSHM) + or a Fiber Channel (FC) switch monitored by Metro Cluster Health Monitor (MCC-HM). + - This is applicable only for a remote SNMPv3 user i.e. only if user is a remote (non-local) user, + application is snmp and authentication method is usm. + type: str + version_added: '20.6.0' + replace_existing_apps_and_methods: + description: + - If the user already exists, the current applications and authentications methods are replaced when state=present. + - If the user already exists, the current applications and authentications methods are removed when state=absent. + - When using application_dicts or REST, this the only supported behavior. + - When using application_strs and ZAPI, this is the behavior when this option is set to always. + - When using application_strs and ZAPI, if the option is set to auto, applications that are not listed are not removed. + - When using application_strs and ZAPI, if the option is set to auto, authentication mehods that are not listed are not removed. + - C(auto) preserve the existing behavior for backward compatibility, but note that REST and ZAPI have inconsistent behavior. + - This is another reason to recommend to use C(application_dicts). + type: str + choices: ['always', 'auto'] + default: 'auto' + version_added: '20.6.0' +''' + +EXAMPLES = """ + + - name: Create User + netapp.ontap.na_ontap_user: + state: present + name: SampleUser + applications: ssh,console + authentication_method: password + set_password: apn1242183u1298u41 + lock_user: True + role_name: vsadmin + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create cluster scoped user in REST. + netapp.ontap.na_ontap_user: + state: present + name: SampleUser + applications: ssh,console + authentication_method: password + set_password: apn1242183u1298u41 + lock_user: True + role_name: admin + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete User + netapp.ontap.na_ontap_user: + state: absent + name: SampleUser + applications: ssh + authentication_method: password + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create user with snmp application (ZAPI) + netapp.ontap.na_ontap_user: + state: present + name: test_cert_snmp + applications: snmp + authentication_method: usm + role_name: admin + authentication_protocol: md5 + authentication_password: '12345678' + privacy_protocol: 'aes128' + privacy_password: '12345678' + engine_id: '7063514941000000000000' + remote_switch_ipaddress: 10.0.0.0 + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Create user + netapp.ontap.na_ontap_user: + state: present + name: test123 + application_dicts: + - application: http + authentication_methods: password + - application: ssh + authentication_methods: password,publickey + role_name: vsadmin + set_password: bobdole1234566 + vserver: "{{ vserver }}" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppOntapUser: + """ + Common operations to manage users and roles. + """ + + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + + application_strs=dict(type='list', elements='str', aliases=['application', 'applications'], + choices=['console', 'http', 'ontapi', 'rsh', 'snmp', + 'sp', 'service-processor', 'service_processor', 'ssh', 'telnet'],), + application_dicts=dict(type='list', elements='dict', + options=dict( + application=dict(required=True, type='str', + choices=['console', 'http', 'ontapi', 'rsh', 'snmp', + 'sp', 'service-processor', 'service_processor', 'ssh', 'telnet'],), + authentication_methods=dict(required=True, type='list', elements='str', + choices=['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml']), + second_authentication_method=dict(type='str', choices=['none', 'password', 'publickey', 'nsswitch']))), + authentication_method=dict(type='str', + choices=['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm', 'cert', 'saml']), + set_password=dict(type='str', no_log=True), + role_name=dict(type='str'), + lock_user=dict(type='bool'), + vserver=dict(type='str', aliases=['svm']), + authentication_protocol=dict(type='str', choices=['none', 'md5', 'sha', 'sha2-256']), + authentication_password=dict(type='str', no_log=True), + engine_id=dict(type='str'), + privacy_protocol=dict(type='str', choices=['none', 'des', 'aes128']), + privacy_password=dict(type='str', no_log=True), + remote_switch_ipaddress=dict(type='str'), + replace_existing_apps_and_methods=dict(type='str', choices=['always', 'auto'], default='auto') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ('application_strs', 'application_dicts') + ], + required_together=[ + ('application_strs', 'authentication_method') + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.strs_to_dicts() + + # REST API should be used for ONTAP 9.6 or higher + self.rest_api = netapp_utils.OntapRestAPI(self.module) + # some attributes are not supported in earlier REST implementation + unsupported_rest_properties = ['authentication_password', 'authentication_protocol', 'engine_id', + 'privacy_password', 'privacy_protocol'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if self.parameters.get('vserver') is None: + self.module.fail_json(msg="Error: vserver is required with ZAPI") + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + self.validate_applications() + + def validate_applications(self): + if not self.use_rest: + if self.parameters['applications'] is None: + self.module.fail_json(msg="application_dicts or application_strs is a required parameter with ZAPI") + for application in self.parameters['applications']: + if application['application'] == 'service_processor': + application['application'] = 'service-processor' + if self.parameters['applications'] is None: + return + application_keys = [] + for application in self.parameters['applications']: + # make sure app entries are not duplicated + application_name = application['application'] + if application_name in application_keys: + self.module.fail_json(msg='Error: repeated application name: %s. Group all authentication methods under a single entry.' % application_name) + application_keys.append(application_name) + if self.use_rest: + if application_name == 'snmp': + self.module.fail_json(msg="snmp as application is not supported in REST.") + # REST prefers certificate to cert + application['authentication_methods'] = ['certificate' if x == 'cert' else x for x in application['authentication_methods']] + # REST get always returns 'second_authentication_method' + if 'second_authentication_method' not in application: + application['second_authentication_method'] = None + + def strs_to_dicts(self): + """transform applications list of strs to a list of dicts if application_strs in use""" + if 'application_dicts' in self.parameters: + for application in self.parameters['application_dicts']: + # keep them sorted for comparison with current + application['authentication_methods'].sort() + self.parameters['applications'] = self.parameters['application_dicts'] + self.parameters['replace_existing_apps_and_methods'] = 'always' + elif 'application_strs' in self.parameters: + # actual conversion + self.parameters['applications'] = [ + dict(application=application, + authentication_methods=[self.parameters['authentication_method']], + second_authentication_method=None + ) for application in self.parameters['application_strs']] + else: + self.parameters['applications'] = None + + def get_user_rest(self): + api = 'security/accounts' + query = { + 'name': self.parameters['name'] + } + if self.parameters.get('vserver') is None: + # vserser is empty for cluster + query['scope'] = 'cluster' + else: + query['owner.name'] = self.parameters['vserver'] + + message, error = self.rest_api.get(api, query) + if error: + self.module.fail_json(msg='Error while fetching user info: %s' % error) + if message['num_records'] == 1: + return message['records'][0]['owner']['uuid'], message['records'][0]['name'] + if message['num_records'] > 1: + self.module.fail_json(msg='Error while fetching user info, found multiple entries: %s' % repr(message)) + + return None + + def get_user_details_rest(self, name, owner_uuid): + query = { + 'fields': 'role,applications,locked' + } + api = "security/accounts/%s/%s" % (owner_uuid, name) + response, error = self.rest_api.get(api, query) + if error: + self.module.fail_json(msg='Error while fetching user details: %s' % error) + if response: + # replace "none" values with None for comparison + for application in response['applications']: + if application.get('second_authentication_method') == 'none': + application['second_authentication_method'] = None + # new read-only attribute in 9.11, breaks idempotency when present + application.pop('is_ldap_fastbind', None) + return_value = { + 'role_name': response['role']['name'], + 'applications': response['applications'] + } + if "locked" in response: + return_value['lock_user'] = response['locked'] + return return_value + + def get_user(self): + """ + Checks if the user exists. + :param: application: application to grant access to, a dict + :return: + Dictionary if user found + None if user is not found + """ + desired_applications = [application['application'] for application in self.parameters['applications']] + desired_method = self.parameters.get('authentication_method') + security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-account-info', **{'vserver': self.parameters['vserver'], + 'user-name': self.parameters['name']}) + + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + security_login_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(security_login_get_iter, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) in ['16034', '16043']: + # Error 16034 denotes a user not being found. + # Error 16043 denotes the user existing, but the application missing. + return None + self.module.fail_json(msg='Error getting user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + if not result.get_child_by_name('num-records') or not int(result.get_child_content('num-records')): + return None + + applications = {} + attr = result.get_child_by_name('attributes-list') + locks = [] + for info in attr.get_children(): + lock_user = self.na_helper.get_value_for_bool(True, info.get_child_content('is-locked')) + locks.append(lock_user) + role_name = info.get_child_content('role-name') + application = info.get_child_content('application') + auth_method = info.get_child_content('authentication-method') + sec_method = info.get_child_content('second-authentication-method') + if self.parameters['replace_existing_apps_and_methods'] == 'always' and application in applications: + applications[application][0].append(auth_method) + if sec_method != 'none': + # we can't change sec_method in place, a tuple is not mutable + applications[application] = (applications[application][0], sec_method) + elif (self.parameters['replace_existing_apps_and_methods'] == 'always' + or (application in desired_applications and auth_method == desired_method)): + # with 'auto' we ignore existing apps that were not asked for + # with auto, only a single method is supported + applications[application] = ([auth_method], sec_method if sec_method != 'none' else None) + apps = [dict(application=application, authentication_methods=sorted(methods), second_authentication_method=sec_method) + for application, (methods, sec_method) in applications.items()] + return dict( + lock_user=any(locks), + role_name=role_name, + applications=apps + ) + + def create_user_rest(self, apps): + api = 'security/accounts' + body = { + 'name': self.parameters['name'], + 'role.name': self.parameters['role_name'], + 'applications': self.na_helper.filter_out_none_entries(apps) + } + if self.parameters.get('vserver') is not None: + # vserser is empty for cluster + body['owner.name'] = self.parameters['vserver'] + if 'set_password' in self.parameters: + body['password'] = self.parameters['set_password'] + if 'lock_user' in self.parameters: + body['locked'] = self.parameters['lock_user'] + dummy, error = self.rest_api.post(api, body) + if ( + error + and 'invalid value' in error['message'] + and any(x in error['message'] for x in ['service-processor', 'service_processor']) + ): + # find if there is an error for service processor application value + # update value as per ONTAP version support + app_list_sp = body['applications'] + for app_item in app_list_sp: + if app_item['application'] == 'service-processor': + app_item['application'] = 'service_processor' + elif app_item['application'] == 'service_processor': + app_item['application'] = 'service-processor' + body['applications'] = app_list_sp + # post again and throw first error in case of an error + dummy, error_sp = self.rest_api.post(api, body) + if not error_sp: + return + + # non-sp errors thrown or initial sp errors + if error: + self.module.fail_json(msg='Error while creating user: %s' % error) + + def create_user(self, application): + for index in range(len(application['authentication_methods'])): + self.create_user_with_auth(application, index) + + def create_user_with_auth(self, application, index): + """ + creates the user for the given application and authentication_method + application is now a directory + :param: application: application to grant access to + """ + user_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-create', **{'vserver': self.parameters['vserver'], + 'user-name': self.parameters['name'], + 'application': application['application'], + 'authentication-method': application['authentication_methods'][index], + 'role-name': self.parameters.get('role_name')}) + if application.get('second_authentication_method') is not None: + user_create.add_new_child('second-authentication-method', application['second_authentication_method']) + if self.parameters.get('set_password') is not None: + user_create.add_new_child('password', self.parameters.get('set_password')) + if application['authentication_methods'][0] == 'usm': + if self.parameters.get('remote_switch_ipaddress') is not None: + user_create.add_new_child('remote-switch-ipaddress', self.parameters.get('remote_switch_ipaddress')) + snmpv3_login_info = netapp_utils.zapi.NaElement('snmpv3-login-info') + if self.parameters.get('authentication_password') is not None: + snmpv3_login_info.add_new_child('authentication-password', self.parameters['authentication_password']) + if self.parameters.get('authentication_protocol') is not None: + snmpv3_login_info.add_new_child('authentication-protocol', self.parameters['authentication_protocol']) + if self.parameters.get('engine_id') is not None: + snmpv3_login_info.add_new_child('engine-id', self.parameters['engine_id']) + if self.parameters.get('privacy_password') is not None: + snmpv3_login_info.add_new_child('privacy-password', self.parameters['privacy_password']) + if self.parameters.get('privacy_protocol') is not None: + snmpv3_login_info.add_new_child('privacy-protocol', self.parameters['privacy_protocol']) + user_create.add_child_elem(snmpv3_login_info) + + try: + self.server.invoke_successfully(user_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def lock_unlock_user_rest(self, owner_uuid, username, value=None): + body = { + 'locked': value + } + error = self.patch_account(owner_uuid, username, body) + if error: + self.module.fail_json(msg='Error while locking/unlocking user: %s' % error) + + def lock_given_user(self): + """ + locks the user + """ + user_lock = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-lock', **{'vserver': self.parameters['vserver'], + 'user-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(user_lock, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error locking user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def unlock_given_user(self): + """ + unlocks the user + """ + user_unlock = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-unlock', **{'vserver': self.parameters['vserver'], + 'user-name': self.parameters['name']}) + + try: + self.server.invoke_successfully(user_unlock, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) != '13114': + self.module.fail_json(msg='Error unlocking user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return + + def delete_user_rest(self, owner_uuid, username): + api = "security/accounts/%s/%s" % (owner_uuid, username) + dummy, error = self.rest_api.delete(api) + if error: + self.module.fail_json(msg='Error while deleting user: %s' % error) + + def delete_user(self, application, methods_to_keep=None): + for index, method in enumerate(application['authentication_methods']): + if methods_to_keep is None or method not in methods_to_keep: + self.delete_user_with_auth(application, index) + + def delete_user_with_auth(self, application, index): + """ + deletes the user for the given application and authentication_method + application is now a dict + :param: application: application to grant access to + """ + user_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-delete', **{'vserver': self.parameters['vserver'], + 'user-name': self.parameters['name'], + 'application': application['application'], + 'authentication-method': application['authentication_methods'][index]}) + + try: + self.server.invoke_successfully(user_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing user %s: %s - application: %s' + % (self.parameters['name'], to_native(error), application), + exception=traceback.format_exc()) + + @staticmethod + def is_repeated_password(message): + return message.startswith('New password must be different than last 6 passwords.') \ + or message.startswith('New password must be different from last 6 passwords.') \ + or message.startswith('New password must be different than the old password.') \ + or message.startswith('New password must be different from the old password.') + + def change_password_rest(self, owner_uuid, username): + body = { + 'password': self.parameters['set_password'], + } + error = self.patch_account(owner_uuid, username, body) + if error: + if 'message' in error and self.is_repeated_password(error['message']): + # if the password is reused, assume idempotency + return False + self.module.fail_json(msg='Error while updating user password: %s' % error) + return True + + def change_password(self): + """ + Changes the password + + :return: + True if password updated + False if password is not updated + :rtype: bool + """ + # self.server.set_vserver(self.parameters['vserver']) + modify_password = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-modify-password', **{ + 'new-password': str(self.parameters.get('set_password')), + 'user-name': self.parameters['name']}) + try: + self.server.invoke_successfully(modify_password, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) == '13114': + return False + # if the user give the same password, instead of returning an error, return ok + if to_native(error.code) == '13214' and self.is_repeated_password(error.message): + return False + self.module.fail_json(msg='Error setting password for user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + self.server.set_vserver(None) + return True + + def modify_apps_rest(self, owner_uuid, username, apps=None): + body = { + 'role.name': self.parameters['role_name'], + 'applications': self.na_helper.filter_out_none_entries(apps) + } + error = self.patch_account(owner_uuid, username, body) + if error: + self.module.fail_json(msg='Error while modifying user details: %s' % error) + + def patch_account(self, owner_uuid, username, body): + query = {'name': self.parameters['name'], 'owner.uuid': owner_uuid} + api = "security/accounts/%s/%s" % (owner_uuid, username) + dummy, result = self.rest_api.patch(api, body, query) + return result + + def modify_user(self, application, current_methods): + for index, method in enumerate(application['authentication_methods']): + if method in current_methods: + self.modify_user_with_auth(application, index) + else: + self.create_user_with_auth(application, index) + + def modify_user_with_auth(self, application, index): + """ + Modify user + application is now a dict + """ + user_modify = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-modify', **{'vserver': self.parameters['vserver'], + 'user-name': self.parameters['name'], + 'application': application['application'], + 'authentication-method': application['authentication_methods'][index], + 'role-name': self.parameters.get('role_name')}) + + try: + self.server.invoke_successfully(user_modify, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying user %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def change_sp_application(self, current_apps): + """Adjust requested app name to match ONTAP convention""" + if not self.parameters['applications']: + return + app_list = [app['application'] for app in current_apps] + for application in self.parameters['applications']: + if application['application'] == 'service_processor' and 'service-processor' in app_list: + application['application'] = 'service-processor' + elif application['application'] == 'service-processor' and 'service_processor' in app_list: + application['application'] = 'service_processor' + + def validate_action(self, action): + errors = [] + if action == 'create': + if not self.parameters.get('role_name'): + errors.append('role_name') + if not self.parameters.get('applications'): + errors.append('application_dicts or application_strs') + if errors: + plural = 's' if len(errors) > 1 else '' + self.module.fail_json(msg='Error: missing required parameter%s for %s: %s.' % + (plural, action, ' and: '.join(errors))) + + def modify_apps_zapi(self, current, modify_decision): + if 'applications' not in modify_decision: + # to change roles, we need at least one app + modify_decision['applications'] = self.parameters['applications'] + current_apps = dict((application['application'], application['authentication_methods']) for application in current['applications']) + for application in modify_decision['applications']: + if application['application'] in current_apps: + self.modify_user(application, current_apps[application['application']]) + else: + self.create_user(application) + desired_apps = dict((application['application'], application['authentication_methods']) + for application in self.parameters['applications']) + for application in current['applications']: + if application['application'] not in desired_apps: + self.delete_user(application) + else: + self.delete_user(application, desired_apps[application['application']]) + + def get_current(self): + owner_uuid, name = None, None + if self.use_rest: + current = self.get_user_rest() + if current is not None: + owner_uuid, name = current + current = self.get_user_details_rest(name, owner_uuid) + self.change_sp_application(current['applications']) + else: + current = self.get_user() + return current, owner_uuid, name + + def define_actions(self, current): + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.use_rest and cd_action is None and current and 'lock_user' not in current and self.parameters.get('lock_user') is not None: + # REST does not return locked if password is not set + if self.parameters.get('set_password') is None: + self.module.fail_json(msg='Error: cannot modify lock state if password is not set.') + modify['lock_user'] = self.parameters['lock_user'] + self.na_helper.changed = True + self.validate_action(cd_action) + return cd_action, modify + + def take_action(self, cd_action, modify, current, owner_uuid, name): + if cd_action == 'create': + if self.use_rest: + self.create_user_rest(self.parameters['applications']) + else: + for application in self.parameters['applications']: + self.create_user(application) + elif cd_action == 'delete': + if self.use_rest: + self.delete_user_rest(owner_uuid, name) + else: + for application in current['applications']: + self.delete_user(application) + elif modify: + if 'role_name' in modify or 'applications' in modify: + if self.use_rest: + self.modify_apps_rest(owner_uuid, name, self.parameters['applications']) + else: + self.modify_apps_zapi(current, modify) + return modify and 'lock_user' in modify + + def apply(self): + current, owner_uuid, name = self.get_current() + cd_action, modify = self.define_actions(current) + deferred_lock = False + + if self.na_helper.changed and not self.module.check_mode: + # lock/unlock actions require password to be set + deferred_lock = self.take_action(cd_action, modify, current, owner_uuid, name) + + password_changed = False + if cd_action is None and self.parameters.get('set_password') is not None and self.parameters['state'] == 'present': + # if check_mode, don't attempt to change the password, but assume it would be changed + if self.use_rest: + password_changed = self.module.check_mode or self.change_password_rest(owner_uuid, name) + else: + password_changed = self.module.check_mode or self.change_password() + if self.module.check_mode: + self.module.warn('Module is not idempotent with check_mode when set_password is present.') + + if deferred_lock: + if self.use_rest: + self.lock_unlock_user_rest(owner_uuid, name, self.parameters['lock_user']) + elif self.parameters.get('lock_user'): + self.lock_given_user() + else: + self.unlock_given_user() + + self.module.exit_json(changed=self.na_helper.changed | password_changed, current=current, modify=modify) + + +def main(): + obj = NetAppOntapUser() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py new file mode 100644 index 000000000..75c5d0993 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_user_role.py @@ -0,0 +1,522 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: na_ontap_user_role + +short_description: NetApp ONTAP user role configuration and management +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create or destroy user roles + +options: + + state: + description: + - Whether the specified user role should exist or not. + choices: ['present', 'absent'] + type: str + default: present + + name: + description: + - The name of the role to manage. + required: true + type: str + + command_directory_name: + description: + - The command or command directory to which the role has an access. + - Required with ZAPI. + - Supported with REST from ONTAP 9.11.1 or later. + type: str + + access_level: + description: + - The access level of the role. + - Use C(privileges) for rest-role access choices. + choices: ['none', 'readonly', 'all'] + type: str + default: all + + query: + description: + - A query for the role. The query must apply to the specified command or directory name. + - Use double quotes "" for modifying a existing query to none. + - Supported with REST from ONTAP 9.11.1 or later. + type: str + version_added: 2.8.0 + + privileges: + description: + - Privileges to give the user roles + - REST only + type: list + elements: dict + version_added: 21.23.0 + suboptions: + query: + description: + - A query for the role. The query must apply to the specified command or directory name. + - Query is only supported on 9.11.1+ + type: str + access: + description: + - The access level of the role. + - For command/command directory path, the only supported enum values are 'none','readonly' and 'all'. + - Options 'read_create', 'read_modify' and 'read_create_modify' are supported only with REST and requires ONTAP 9.11.1 or later versions. + choices: ['none', 'readonly', 'all', 'read_create', 'read_modify', 'read_create_modify'] + default: all + type: str + path: + description: + - The api or command to which the role has an access. + - command or command directory path is supported from ONTAP 9.11.1 or later versions. + - Only rest roles are supported for earlier versions. + type: str + required: true + + vserver: + description: + - The name of the vserver to use. + - Required with ZAPI. + type: str + +notes: + - supports ZAPI and REST. REST requires ONTAP 9.7 or later. + - supports check mode. + - when trying to add a command to a role, ONTAP will affect other related commands too. + - for example, 'volume modify' will affect 'volume create' and 'volume show', always provide all the related commands. + - REST supports both role and rest-role from ONTAP 9.11.1 or later versions and only rest-role for earlier versions. +''' + +EXAMPLES = """ + + - name: Create User Role Zapi + netapp.ontap.na_ontap_user_role: + state: present + name: ansibleRole + command_directory_name: volume + access_level: none + query: show + vserver: ansibleVServer + use_rest: never + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify User Role Zapi + netapp.ontap.na_ontap_user_role: + state: present + name: ansibleRole + command_directory_name: volume + access_level: none + query: "" + vserver: ansibleVServer + use_rest: never + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create user role REST in ONTAP 9.11.1. + netapp.ontap.na_ontap_user_role: + state: present + privileges: + - path: /api/cluster/jobs + vserver: ansibleSVM + name: carchi-test-role + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify user role REST in ONTAP 9.11.1. + netapp.ontap.na_ontap_user_role: + state: present + privileges: + - path: /api/cluster/jobs + access: readonly + - path: /api/storage/volumes + access: readonly + vserver: ansibleSVM + name: carchi-test-role + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ + +""" +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapUserRole: + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + command_directory_name=dict(required=False, type='str'), + access_level=dict(required=False, type='str', default='all', + choices=['none', 'readonly', 'all']), + vserver=dict(required=False, type='str'), + query=dict(required=False, type='str'), + privileges=dict(required=False, type='list', elements='dict', options=dict( + query=dict(required=False, type='str'), + access=dict(required=False, type='str', default='all', + choices=['none', 'readonly', 'all', 'read_create', 'read_modify', 'read_create_modify']), + path=dict(required=True, type='str') + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('command_directory_name', 'privileges'), + ('access_level', 'privileges'), + ('query', 'privileges')] + ) + self.owner_uuid = None + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters.get('privileges') is not None: + self.parameters['privileges'] = self.na_helper.filter_out_none_entries(self.parameters['privileges']) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + partially_supported_rest_properties = [ + ['query', (9, 11, 1)], + ['privileges.query', (9, 11, 1)], + ['command_directory_name', (9, 11, 1)] + ] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties) + if self.use_rest and not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7, 0): + msg = 'REST requires ONTAP 9.7 or later for security/roles APIs.' + self.use_rest = self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + if not self.use_rest: + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if not self.parameters.get('vserver'): + self.module.fail_json(msg="Error: vserver is required field with ZAPI.") + if not self.parameters.get('command_directory_name'): + self.module.fail_json(msg="Error: command_directory_name is required field with ZAPI") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + elif not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1) and self.parameters['state'] == 'present': + self.validate_rest_path() + + def validate_rest_path(self): + """ + REST does not support command or command directory path in ONTAP < 9.11.1 versions. + """ + invalid_uri = [] + for privilege in self.parameters.get('privileges', []): + # an api path have '/' in it, validate it present for ONTAP earlier versions. + if '/' not in privilege['path']: + invalid_uri.append(privilege['path']) + if invalid_uri: + self.module.fail_json(msg="Error: Invalid URI %s, please set valid REST API path" % invalid_uri) + + def get_role(self): + """ + Checks if the role exists for specific command-directory-name. + + :return: + True if role found + False if role is not found + :rtype: bool + """ + if self.use_rest: + return self.get_role_rest() + options = {'vserver': self.parameters['vserver'], + 'role-name': self.parameters['name'], + 'command-directory-name': self.parameters['command_directory_name']} + security_login_role_get_iter = netapp_utils.zapi.NaElement( + 'security-login-role-get-iter') + query_details = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-role-info', **options) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(query_details) + security_login_role_get_iter.add_child_elem(query) + + try: + result = self.server.invoke_successfully( + security_login_role_get_iter, enable_tunneling=False) + except netapp_utils.zapi.NaApiError as e: + # Error 16031 denotes a role not being found. + if to_native(e.code) == "16031": + return None + # Error 16039 denotes command directory not found. + elif to_native(e.code) == "16039": + return None + else: + self.module.fail_json(msg='Error getting role %s: %s' % (self.parameters['name'], to_native(e)), + exception=traceback.format_exc()) + if (result.get_child_by_name('num-records') and + int(result.get_child_content('num-records')) >= 1): + role_info = result.get_child_by_name('attributes-list').get_child_by_name('security-login-role-info') + result = { + 'name': role_info['role-name'], + 'access_level': role_info['access-level'], + 'command_directory_name': role_info['command-directory-name'], + 'query': role_info['role-query'] + } + return result + return None + + def create_role(self): + if self.use_rest: + return self.create_role_rest() + options = {'vserver': self.parameters['vserver'], + 'role-name': self.parameters['name'], + 'command-directory-name': self.parameters['command_directory_name'], + 'access-level': self.parameters['access_level']} + if self.parameters.get('query'): + options['role-query'] = self.parameters['query'] + role_create = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-create', **options) + + try: + self.server.invoke_successfully(role_create, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating role %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_role(self): + if self.use_rest: + return self.delete_role_rest() + role_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'security-login-role-delete', **{'vserver': self.parameters['vserver'], + 'role-name': self.parameters['name'], + 'command-directory-name': + self.parameters['command_directory_name']}) + + try: + self.server.invoke_successfully(role_delete, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error removing role %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_role(self, modify): + if self.use_rest: + return self.modify_role_rest(modify) + options = {'vserver': self.parameters['vserver'], + 'role-name': self.parameters['name'], + 'command-directory-name': self.parameters['command_directory_name']} + if 'access_level' in modify.keys(): + options['access-level'] = self.parameters['access_level'] + if 'query' in modify.keys(): + options['role-query'] = self.parameters['query'] + + role_modify = netapp_utils.zapi.NaElement.create_node_with_children('security-login-role-modify', **options) + + try: + self.server.invoke_successfully(role_modify, + enable_tunneling=False) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying role %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def get_role_rest(self): + api = 'security/roles' + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 11, 1): + fields = 'name,owner,privileges.path,privileges.access,privileges.query' + else: + fields = 'name,owner,privileges.path,privileges.access' + params = {'name': self.parameters['name'], + 'fields': fields} + if self.parameters.get('vserver'): + params['owner.name'] = self.parameters['vserver'] + else: + params['scope'] = 'cluster' + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg="Error getting role %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return self.format_record(record) + + def format_record(self, record): + if not record: + return None + for each in self.na_helper.safe_get(record, ['privileges']): + if each['path'] == 'DEFAULT': + record['privileges'].remove(each) + for each in self.na_helper.safe_get(record, ['privileges']): + if each.get('_links'): + each.pop('_links') + return_record = { + 'name': self.na_helper.safe_get(record, ['name']), + 'privileges': self.na_helper.safe_get(record, ['privileges']), + } + self.owner_uuid = self.na_helper.safe_get(record, ['owner', 'uuid']) + return return_record + + def create_role_rest(self): + api = 'security/roles' + body = {'name': self.parameters['name']} + if self.parameters.get('vserver'): + body['owner.name'] = self.parameters['vserver'] + body['privileges'] = self.parameters['privileges'] + dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating role %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_role_rest(self): + api = 'security/roles' + uuids = '%s/%s' % (self.owner_uuid, self.parameters['name']) + dummy, error = rest_generic.delete_async(self.rest_api, api, uuids, job_timeout=120) + if error: + self.module.fail_json(msg='Error deleting role %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def modify_role_rest(self, modify): + # there is no direct modify for role. + privileges = self.get_role_privileges_rest() + modify_privilege = [] + for privilege in modify['privileges']: + path = privilege['path'] + modify_privilege.append(path) + # if the path is not in privilege then it need to be added + if path not in privileges: + self.create_role_privilege(privilege) + elif privilege.get('query'): + if not privileges[path].get('query'): + self.modify_role_privilege(privilege, path) + elif privilege['query'] != privileges[path]['query']: + self.modify_role_privilege(privilege, path) + elif privilege.get('access') and privilege['access'] != privileges[path]['access']: + self.modify_role_privilege(privilege, path) + for privilege_path in privileges: + if privilege_path not in modify_privilege: + self.delete_role_privilege(privilege_path) + + def get_role_privileges_rest(self): + api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name']) + records, error = rest_generic.get_0_or_more_records(self.rest_api, api, {}) + if error: + self.module.fail_json(msg="Error getting role privileges for role %s: %s" % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return self.format_privileges(records) + + def format_privileges(self, records): + return_dict = {} + for record in records: + return_dict[record['path']] = record + return return_dict + + def create_role_privilege(self, privilege): + api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name']) + body = {'path': privilege['path'], 'access': privilege['access']} + dummy, error = rest_generic.post_async(self.rest_api, api, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error creating role privilege %s: %s' % (privilege['path'], to_native(error)), + exception=traceback.format_exc()) + + def modify_role_privilege(self, privilege, path): + path = path.replace('/', '%2F') + api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name']) + body = {} + if privilege.get('access'): + body['access'] = privilege['access'] + if privilege.get('query'): + body['query'] = privilege['query'] + dummy, error = rest_generic.patch_async(self.rest_api, api, path, body) + if error: + self.module.fail_json(msg='Error modifying privileges for path %s: %s' % (path, to_native(error)), + exception=traceback.format_exc()) + + def delete_role_privilege(self, path): + path = path.replace('/', '%2F') + api = 'security/roles/%s/%s/privileges' % (self.owner_uuid, self.parameters['name']) + dummy, error = rest_generic.delete_async(self.rest_api, api, path, job_timeout=120) + if error: + # removing one of relevant commands will also remove all other commands in group. + # skip if entry does not exist error occurs. + if "entry doesn't exist" in error and "'target': 'path'" in error: + return + self.module.fail_json(msg='Error deleting role privileges %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def convert_parameters(self): + if self.parameters.get('privileges') is not None: + return + if not self.parameters.get('command_directory_name'): + self.module.fail_json(msg="Error: either path or command_directory_name is required in REST.") + self.parameters['privileges'] = [] + temp_dict = { + 'path': self.parameters['command_directory_name'], + 'access': self.parameters['access_level'] + } + self.parameters.pop('command_directory_name') + self.parameters.pop('access_level') + if self.parameters.get('query'): + temp_dict['query'] = self.parameters['query'] + self.parameters.pop('query') + self.parameters['privileges'] = [temp_dict] + + def validate_create_modify_required(self, current, modify): + new_current = self.get_role() + new_cd_action = self.na_helper.get_cd_action(new_current, self.parameters) + new_modify = None if new_cd_action else self.na_helper.get_modified_attributes(new_current, self.parameters) + msg = '' + if current is None and new_modify: + msg = "Create operation also affected additional related commands: %s" % new_current['privileges'] + elif modify and new_cd_action == 'create': + msg = """Create role is required, desired is: %s but it's a subset of relevant commands/command directory configured in current: %s, + deleting one of the commands will remove all the commands in the relevant group""" % (self.parameters['privileges'], current['privileges']) + elif modify and new_modify: + msg = "modify is required, desired: %s and new current: %s" % (self.parameters['privileges'], new_current['privileges']) + if msg: + self.module.warn(msg) + + def apply(self): + if self.use_rest: + # if rest convert parameters to rest format if zapi format is used + self.convert_parameters() + current = self.get_role() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + # if desired state specify empty quote query and current query is None, set desired query to None. + # otherwise na_helper.get_modified_attributes will detect a change. + # for REST, query is part of a tuple in privileges list. + if not self.use_rest and self.parameters.get('query') == '' and current is not None and current['query'] is None: + self.parameters['query'] = None + + modify = None if cd_action else self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_role() + elif cd_action == 'delete': + self.delete_role() + elif modify: + self.modify_role(modify) + if self.use_rest: + self.validate_create_modify_required(current, modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppOntapUserRole() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py new file mode 100644 index 000000000..7ca007c29 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume.py @@ -0,0 +1,2902 @@ +#!/usr/bin/python + +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_volume +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' + +module: na_ontap_volume + +short_description: NetApp ONTAP manage volumes. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - Create or destroy or modify volumes on NetApp ONTAP. + +options: + + state: + description: + - Whether the specified volume should exist or not. + choices: ['present', 'absent'] + type: str + default: 'present' + + name: + description: + - The name of the volume to manage. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + from_name: + description: + - Name of the existing volume to be renamed to name. + type: str + version_added: 2.7.0 + + is_infinite: + type: bool + description: + - Set True if the volume is an Infinite Volume. + - Deleting an infinite volume is asynchronous. + default: false + + is_online: + type: bool + description: + - Whether the specified volume is online, or not. + default: True + + aggregate_name: + description: + - The name of the aggregate the flexvol should exist on. + - Cannot be set when using the na_application_template option. + type: str + + tags: + description: + - Tags are an optional way to track the uses of a resource. + - Tag values must be formatted as key:value strings, example ["team:csi", "environment:test"] + type: list + elements: str + version_added: 22.6.0 + + nas_application_template: + description: + - additional options when using the application/applications REST API to create a volume. + - the module is using ZAPI by default, and switches to REST if any suboption is present. + - create a FlexVol by default. + - create a FlexGroup if C(auto_provision_as) is set and C(FlexCache) option is not present. + - create a FlexCache if C(flexcache) option is present. + type: dict + version_added: 20.12.0 + suboptions: + flexcache: + description: whether to create a flexcache. If absent, a FlexVol or FlexGroup is created. + type: dict + suboptions: + dr_cache: + description: + - whether to use the same flexgroup msid as the origin. + - requires ONTAP 9.9 and REST. + - create only option, ignored if the flexcache already exists. + type: bool + version_added: 21.3.0 + origin_svm_name: + description: the remote SVM for the flexcache. + type: str + required: true + origin_component_name: + description: the remote component for the flexcache. + type: str + required: true + cifs_access: + description: + - The list of CIFS access controls. You must provide I(user_or_group) or I(access) to enable CIFS access. + type: list + elements: dict + suboptions: + access: + description: The CIFS access granted to the user or group. Default is full_control. + type: str + choices: [change, full_control, no_access, read] + user_or_group: + description: The name of the CIFS user or group that will be granted access. Default is Everyone. + type: str + nfs_access: + description: + - The list of NFS access controls. You must provide I(host) or I(access) to enable NFS access. + - Mutually exclusive with export_policy option. + type: list + elements: dict + suboptions: + access: + description: The NFS access granted. Default is rw. + type: str + choices: [none, ro, rw] + host: + description: The name of the NFS entity granted access. Default is 0.0.0.0/0. + type: str + storage_service: + description: + - The performance service level (PSL) for this volume + type: str + choices: ['value', 'performance', 'extreme'] + tiering: + description: + - Cloud tiering policy (see C(tiering_policy) for a more complete description). + type: dict + suboptions: + control: + description: Storage tiering placement rules for the container. + choices: ['required', 'best_effort', 'disallowed'] + type: str + policy: + description: + - Cloud tiering policy (see C(tiering_policy)). + - Must match C(tiering_policy) if both are present. + choices: ['all', 'auto', 'none', 'snapshot-only'] + type: str + object_stores: + description: list of object store names for tiering. + type: list + elements: str + exclude_aggregates: + description: + - The list of aggregate names to exclude when creating a volume. + - Requires ONTAP 9.9.1 GA or later. + type: list + elements: str + version_added: 21.7.0 + use_nas_application: + description: + - Whether to use the application/applications REST/API to create a volume. + - This will default to true if any other suboption is present. + type: bool + default: true + + size: + description: + - The size of the volume in (size_unit). Required when C(state=present). + type: int + + size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + type: str + default: 'gb' + + size_change_threshold: + description: + - Percentage in size change to trigger a resize. + - When this parameter is greater than 0, a difference in size between what is expected and what is configured is ignored if it is below the threshold. + - For instance, the nas application allocates a larger size than specified to account for overhead. + - Set this to 0 for an exact match. + type: int + default: 10 + version_added: 20.12.0 + + sizing_method: + description: + - Represents the method to modify the size of a FlexGroup. + - use_existing_resources - Increases or decreases the size of the FlexGroup by increasing or decreasing the size of the current FlexGroup resources. + - add_new_resources - Increases the size of the FlexGroup by adding new resources. This is limited to two new resources per available aggregate. + - This is only supported if REST is enabled (ONTAP 9.6 or later) and only for FlexGroups. ONTAP defaults to use_existing_resources. + type: str + choices: ['add_new_resources', 'use_existing_resources'] + version_added: 20.12.0 + + type: + description: + - The volume type, either read-write (RW) or data-protection (DP). + type: str + + export_policy: + description: + - Name of the export policy. + - Mutually exclusive with nfs_access suboption in nas_application_template. + type: str + aliases: ['policy'] + + junction_path: + description: + - Junction path of the volume. + - To unmount, use junction path C(''). + type: str + + space_guarantee: + description: + - Space guarantee style for the volume. + - The file setting is no longer supported. + choices: ['none', 'file', 'volume'] + type: str + + percent_snapshot_space: + description: + - Amount of space reserved for snapshot copies of the volume. + type: int + + volume_security_style: + description: + - The security style associated with this volume. + choices: ['mixed', 'ntfs', 'unified', 'unix'] + type: str + + encrypt: + type: bool + description: + - Whether or not to enable Volume Encryption. + - If not present, ONTAP defaults to false at volume creation. + - Changing encrypt value after creation requires ONTAP 9.3 or later. + version_added: 2.7.0 + + efficiency_policy: + description: + - Allows a storage efficiency policy to be set on volume creation. + type: str + version_added: 2.7.0 + + unix_permissions: + description: + - Unix permission bits in octal or symbolic format. + - For example, 0 is equivalent to ------------, 777 is equivalent to ---rwxrwxrwx,both formats are accepted. + - The valid octal value ranges between 0 and 777 inclusive. + type: str + version_added: 2.8.0 + + group_id: + description: + - The UNIX group ID for the volume. The default value is 0 ('root'). + type: int + version_added: '20.1.0' + + user_id: + description: + - The UNIX user ID for the volume. The default value is 0 ('root'). + type: int + version_added: '20.1.0' + + snapshot_policy: + description: + - The name of the snapshot policy. + - The default policy name is 'default'. + - If present, this will set the protection_type when using C(nas_application_template). + type: str + version_added: 2.8.0 + + aggr_list: + description: + - an array of names of aggregates to be used for FlexGroup constituents. + type: list + elements: str + version_added: 2.8.0 + + aggr_list_multiplier: + description: + - The number of times to iterate over the aggregates listed with the aggr_list parameter when creating a FlexGroup. + type: int + version_added: 2.8.0 + + auto_provision_as: + description: + - Automatically provision a FlexGroup volume. + version_added: 2.8.0 + choices: ['flexgroup'] + type: str + + snapdir_access: + description: + - This is an advanced option, the default is False. + - Enable the visible '.snapshot' directory that is normally present at system internal mount points. + - This value also turns on access to all other '.snapshot' directories in the volume. + type: bool + version_added: 2.8.0 + + atime_update: + description: + - This is an advanced option, the default is True. + - If false, prevent the update of inode access times when a file is read. + - This value is useful for volumes with extremely high read traffic, + since it prevents writes to the inode file for the volume from contending with reads from other files. + - This field should be used carefully. + - That is, use this field when you know in advance that the correct access time for inodes will not be needed for files on that volume. + type: bool + version_added: 2.8.0 + + wait_for_completion: + description: + - Set this parameter to 'true' for synchronous execution during create (wait until volume status is online) + - Set this parameter to 'false' for asynchronous execution + - For asynchronous, execution exits as soon as the request is sent, without checking volume status + type: bool + default: false + version_added: 2.8.0 + + time_out: + description: + - With ZAPI - time to wait for Flexgroup creation, modification, or deletion in seconds. + - With REST - time to wait for any volume creation, modification, or deletion in seconds. + - Error out if task is not completed in defined time. + - With ZAPI - if 0, the request is asynchronous. + - Default is set to 3 minutes. + - Use C(max_wait_time) and C(wait_for_completion) for volume move and encryption operations. + default: 180 + type: int + version_added: 2.8.0 + + max_wait_time: + description: + - Volume move and encryption operations might take longer time to complete. + - With C(wait_for_completion) set, module will wait for time set in this option for volume move and encryption to complete. + - If time exipres, module exit and the operation may still running. + - Default is set to 10 minutes. + default: 600 + type: int + version_added: 22.0.0 + + language: + description: + - Language to use for Volume + - Default uses SVM language + - Possible values Language + - c POSIX + - ar Arabic + - cs Czech + - da Danish + - de German + - en English + - en_us English (US) + - es Spanish + - fi Finnish + - fr French + - he Hebrew + - hr Croatian + - hu Hungarian + - it Italian + - ja Japanese euc-j + - ja_v1 Japanese euc-j + - ja_jp.pck Japanese PCK (sjis) + - ja_jp.932 Japanese cp932 + - ja_jp.pck_v2 Japanese PCK (sjis) + - ko Korean + - no Norwegian + - nl Dutch + - pl Polish + - pt Portuguese + - ro Romanian + - ru Russian + - sk Slovak + - sl Slovenian + - sv Swedish + - tr Turkish + - zh Simplified Chinese + - zh.gbk Simplified Chinese (GBK) + - zh_tw Traditional Chinese euc-tw + - zh_tw.big5 Traditional Chinese Big 5 + - To use UTF-8 as the NFS character set, append '.UTF-8' to the language code + type: str + version_added: 2.8.0 + + qos_policy_group: + description: + - Specifies a QoS policy group to be set on volume. + type: str + version_added: 2.9.0 + + qos_adaptive_policy_group: + description: + - Specifies a QoS adaptive policy group to be set on volume. + type: str + version_added: 2.9.0 + + tiering_policy: + description: + - The tiering policy that is to be associated with the volume. + - This policy decides whether the blocks of a volume will be tiered to the capacity tier. + - snapshot-only policy allows tiering of only the volume snapshot copies not associated with the active file system. + - auto policy allows tiering of both snapshot and active file system user data to the capacity tier. + - backup policy on DP volumes allows all transferred user data blocks to start in the capacity tier. + - all is the REST equivalent for backup. + - When set to none, the Volume blocks will not be tiered to the capacity tier. + - If no value specified, the volume is assigned snapshot only by default. + - Requires ONTAP 9.4 or later. + choices: ['snapshot-only', 'auto', 'backup', 'none', 'all'] + type: str + version_added: 2.9.0 + + space_slo: + description: + - Specifies the space SLO type for the volume. The space SLO type is the Service Level Objective for space management for the volume. + - The space SLO value is used to enforce existing volume settings so that sufficient space is set aside on the aggregate to meet the space SLO. + - This parameter is not supported on Infinite Volumes. + choices: ['none', 'thick', 'semi-thick'] + type: str + version_added: 2.9.0 + + nvfail_enabled: + description: + - If true, the controller performs additional work at boot and takeover times if it finds that there has been any potential data loss in the volume's + constituents due to an NVRAM failure. + - The volume's constituents would be put in a special state called 'in-nvfailed-state' such that protocol access is blocked. + - This will cause the client applications to crash and thus prevent access to stale data. + - To get out of this situation, the admin needs to manually clear the 'in-nvfailed-state' on the volume's constituents. + type: bool + version_added: 2.9.0 + + vserver_dr_protection: + description: + - Specifies the protection type for the volume in a Vserver DR setup. + choices: ['protected', 'unprotected'] + type: str + version_added: 2.9.0 + + comment: + description: + - Sets a comment associated with the volume. + type: str + version_added: 2.9.0 + + snapshot_auto_delete: + description: + - A dictionary for the auto delete options and values. + - Supported options include 'state', 'commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete', + 'prefix', 'destroy_list'. + - Option 'state' determines if the snapshot autodelete is currently enabled for the volume. Possible values are 'on' and 'off'. + - Option 'commitment' determines the snapshots which snapshot autodelete is allowed to delete to get back space. + Possible values are 'try', 'disrupt' and 'destroy'. + - Option 'trigger' determines the condition which starts the automatic deletion of snapshots. + Possible values are 'volume', 'snap_reserve' and DEPRECATED 'space_reserve'. + - Option 'target_free_space' determines when snapshot autodelete should stop deleting snapshots. Depending on the trigger, + snapshots are deleted till we reach the target free space percentage. Accepts int type. + - Option 'delete_order' determines if the oldest or newest snapshot is deleted first. Possible values are 'newest_first' and 'oldest_first'. + - Option 'defer_delete' determines which kind of snapshots to delete in the end. Possible values are 'scheduled', 'user_created', + 'prefix' and 'none'. + - Option 'prefix' can be set to provide the prefix string for the 'prefix' value of the 'defer_delete' option. + The prefix string length can be 15 char long. + - Option 'destroy_list' is a comma seperated list of services which can be destroyed if the snapshot backing that service is deleted. + For 7-mode, the possible values for this option are a combination of 'lun_clone', 'vol_clone', 'cifs_share', 'file_clone' or 'none'. + For cluster-mode, the possible values for this option are a combination of 'lun_clone,file_clone' (for LUN clone and/or file clone), + 'lun_clone,sfsr' (for LUN clone and/or sfsr), 'vol_clone', 'cifs_share', or 'none'. + type: dict + version_added: '20.4.0' + + cutover_action: + description: + - Specifies the action to be taken for cutover. + - Possible values are 'abort_on_failure', 'defer_on_failure', 'force' and 'wait'. Default is 'defer_on_failure'. + choices: ['abort_on_failure', 'defer_on_failure', 'force', 'wait'] + type: str + version_added: '20.5.0' + + check_interval: + description: + - The amount of time in seconds to wait between checks of a volume to see if it has moved successfully. + default: 30 + type: int + version_added: '20.6.0' + + from_vserver: + description: + - The source vserver of the volume is rehosted. + type: str + version_added: '20.6.0' + + auto_remap_luns: + description: + - Flag to control automatic map of LUNs. + type: bool + version_added: '20.6.0' + + force_unmap_luns: + description: + - Flag to control automatic unmap of LUNs. + type: bool + version_added: '20.6.0' + + force_restore: + description: + - If this field is set to "true", the Snapshot copy is restored even if the volume has one or more newer Snapshot + copies which are currently used as reference Snapshot copy by SnapMirror. If a restore is done in this + situation, this will cause future SnapMirror transfers to fail. + - Option should only be used along with snapshot_restore. + type: bool + version_added: '20.6.0' + + preserve_lun_ids: + description: + - If this field is set to "true", LUNs in the volume being restored will remain mapped and their identities + preserved such that host connectivity will not be disrupted during the restore operation. I/O's to the LUN will + be fenced during the restore operation by placing the LUNs in an unavailable state. Once the restore operation + has completed, hosts will be able to resume I/O access to the LUNs. + - Option should only be used along with snapshot_restore. + type: bool + version_added: '20.6.0' + + snapshot_restore: + description: + - Name of snapshot to restore from. + - Not supported on Infinite Volume. + type: str + version_added: '20.6.0' + + compression: + description: + - Whether to enable compression for the volume (HDD and Flash Pool aggregates). + - If this option is not present, it is automatically set to true if inline_compression is true. + type: bool + version_added: '20.12.0' + + inline_compression: + description: + - Whether to enable inline compression for the volume (HDD and Flash Pool aggregates, AFF platforms). + type: bool + version_added: '20.12.0' + + tiering_minimum_cooling_days: + description: + - Determines how many days must pass before inactive data in a volume using the Auto or Snapshot-Only policy is + considered cold and eligible for tiering. + - This option is only supported in REST 9.8 or later. + type: int + version_added: '20.16.0' + + logical_space_enforcement: + description: + - This optionally specifies whether to perform logical space accounting on the volume. When space is enforced + logically, ONTAP enforces volume settings such that all the physical space saved by the storage efficiency + features will be calculated as used. + - This is only supported with REST. + type: bool + version_added: '20.16.0' + + logical_space_reporting: + description: + - This optionally specifies whether to report space logically on the volume. When space is reported logically, + ONTAP reports the volume space such that all the physical space saved by the storage efficiency features are also + reported as used. + - This is only supported with REST. + type: bool + version_added: '20.16.0' + + snaplock: + description: + - Starting with ONTAP 9.10.1, snaplock.type is set at the volume level. + - The other suboptions can be set or modified when using REST on earlier versions of ONTAP. + - This option and suboptions are only supported with REST. + type: dict + version_added: 21.18.0 + suboptions: + append_mode_enabled: + description: + - when enabled, all the files created with write permissions on the volume are, by default, + WORM appendable files. The user can append the data to a WORM appendable file but cannot modify + the existing contents of the file nor delete the file until it expires. + type: bool + autocommit_period: + description: + - autocommit period for SnapLock volume. All files which are not modified for a period greater than + the autocommit period of the volume are committed to the WORM state. + - duration is in the ISO-8601 duration format (eg PY, PM, PD, PTH, PTM). + - examples P30M, P10Y, PT1H, "none". A duration that combines different periods is not supported. + type: str + privileged_delete: + description: + - privileged-delete attribute of a SnapLock volume. + - On a SnapLock Enterprise (SLE) volume, a designated privileged user can selectively delete files irrespective of the retention time of the file. + - On a SnapLock Compliance (SLC) volume, it is always permanently_disabled. + type: str + choices: [disabled, enabled, permanently_disabled] + retention: + description: + - default, maximum, and minumum retention periods for files committed to the WORM state on the volume. + - durations are in the ISO-8601 duration format, see autocommit_period. + type: dict + suboptions: + default: + description: + - default retention period that is applied to files while committing them to the WORM state without an associated retention period. + type: str + maximum: + description: + - maximum allowed retention period for files committed to the WORM state on the volume. + type: str + minimum: + description: + - minimum allowed retention period for files committed to the WORM state on the volume. + type: str + type: + description: + - The SnapLock type of the volume. + - compliance - A SnapLock Compliance (SLC) volume provides the highest level of WORM protection and + an administrator cannot destroy a SLC volume if it contains unexpired WORM files. + - enterprise - An administrator can delete a SnapLock Enterprise (SLE) volume. + - non_snaplock - Indicates the volume is non-snaplock. + type: str + choices: [compliance, enterprise, non_snaplock] + + max_files: + description: + - The maximum number of files (inodes) for user-visible data allowed on the volume. + - Note - ONTAP allocates a slightly different value, for instance 3990 when asking for 4000. + Tp preserve idempotency, small variations in size are ignored. + type: int + version_added: '20.18.0' + + analytics: + description: + - Set file system analytics state of the volume. + - Only supported with REST and requires ONTAP 9.8 or later version. + - Cannot enable analytics for volume that contains luns. + type: str + version_added: '22.0.0' + choices: ['on', 'off'] + +notes: + - supports REST and ZAPI. REST requires ONTAP 9.6 or later. Efficiency with REST requires ONTAP 9.7 or later. + - REST is enabled when C(use_rest) is set to always. + - The feature_flag C(warn_or_fail_on_fabricpool_backend_change) controls whether an error is reported when + tiering control would require or disallow FabricPool for an existing volume with a different backend. + Allowed values are fail, warn, and ignore, and the default is set to fail. + - snapshot_restore is not idempotent, it always restores. + +''' + +EXAMPLES = """ + + - name: Create FlexVol + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume12 + is_infinite: False + aggregate_name: ansible_aggr + size: 100 + size_unit: mb + user_id: 1001 + group_id: 2002 + space_guarantee: none + tiering_policy: auto + export_policy: default + percent_snapshot_space: 60 + qos_policy_group: max_performance_gold + vserver: ansibleVServer + wait_for_completion: True + space_slo: none + nvfail_enabled: False + comment: ansible created volume + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Volume Delete + netapp.ontap.na_ontap_volume: + state: absent + name: ansibleVolume12 + aggregate_name: ansible_aggr + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Make FlexVol offline + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume + is_infinite: False + is_online: False + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Create Flexgroup volume manually + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume + is_infinite: False + aggr_list: "{{ aggr_list }}" + aggr_list_multiplier: 2 + size: 200 + size_unit: mb + space_guarantee: none + export_policy: default + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: False + unix_permissions: 777 + snapshot_policy: default + time_out: 0 + + - name: Create Flexgroup volume auto provsion as flex group + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume + is_infinite: False + auto_provision_as: flexgroup + size: 200 + size_unit: mb + space_guarantee: none + export_policy: default + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: False + unix_permissions: 777 + snapshot_policy: default + time_out: 0 + + - name: Create FlexVol with QoS adaptive + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume15 + is_infinite: False + aggregate_name: ansible_aggr + size: 100 + size_unit: gb + space_guarantee: none + export_policy: default + percent_snapshot_space: 10 + qos_adaptive_policy_group: extreme + vserver: ansibleVServer + wait_for_completion: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify volume dr protection (vserver of the volume must be in a snapmirror relationship) + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume + vserver_dr_protection: protected + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: False + + - name: Modify volume with snapshot auto delete options + netapp.ontap.na_ontap_volume: + state: present + name: vol_auto_delete + snapshot_auto_delete: + state: "on" + commitment: try + defer_delete: scheduled + target_free_space: 30 + destroy_list: lun_clone,vol_clone + delete_order: newest_first + aggregate_name: "{{ aggr }}" + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: False + + - name: Move volume with force cutover action + netapp.ontap.na_ontap_volume: + name: ansible_vol + aggregate_name: aggr_ansible + cutover_action: force + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: false + + - name: Rehost volume to another vserver auto remap luns + netapp.ontap.na_ontap_volume: + name: ansible_vol + from_vserver: ansible + auto_remap_luns: true + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: false + + - name: Rehost volume to another vserver force unmap luns + netapp.ontap.na_ontap_volume: + name: ansible_vol + from_vserver: ansible + force_unmap_luns: true + vserver: "{{ vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: false + + - name: Snapshot restore volume + netapp.ontap.na_ontap_volume: + name: ansible_vol + vserver: ansible + snapshot_restore: 2020-05-24-weekly + force_restore: true + preserve_lun_ids: true + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + + - name: Volume create using application/applications nas template + netapp.ontap.na_ontap_volume: + state: present + name: ansibleVolume12 + vserver: ansibleSVM + size: 100000000 + size_unit: b + space_guarantee: none + language: es + percent_snapshot_space: 60 + unix_permissions: ---rwxrwxrwx + snapshot_policy: default + efficiency_policy: default + comment: testing + nas_application_template: + nfs_access: # the mere presence of a suboption is enough to enable this new feature + - access: ro + - access: rw + host: 10.0.0.0/8 + exclude_aggregates: aggr0 + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: false + + # requires Ontap collection version - 21.24.0 to use iso filter plugin. + - name: volume create with snaplock set. + netapp.ontap.na_ontap_volume: + state: present + name: "{{ snaplock_volume }}" + aggregate_name: "{{ aggregate }}" + size: 20 + size_unit: mb + space_guarantee: none + policy: default + type: rw + snaplock: + type: enterprise + retention: + default: "{{ 60 | netapp.ontap.iso8601_duration_from_seconds }}" + +""" + +RETURN = """ +""" + +import time +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.rest_application import RestApplication +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +class NetAppOntapVolume: + '''Class with volume operations''' + + def __init__(self): + '''Initialize module parameters''' + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + from_name=dict(required=False, type='str'), + is_infinite=dict(required=False, type='bool', default=False), + is_online=dict(required=False, type='bool', default=True), + size=dict(type='int', default=None), + size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'), + sizing_method=dict(choices=['add_new_resources', 'use_existing_resources'], type='str'), + aggregate_name=dict(type='str', default=None), + type=dict(type='str', default=None), + export_policy=dict(type='str', default=None, aliases=['policy']), + junction_path=dict(type='str', default=None), + space_guarantee=dict(choices=['none', 'file', 'volume'], default=None), + percent_snapshot_space=dict(type='int', default=None), + volume_security_style=dict(choices=['mixed', 'ntfs', 'unified', 'unix']), + encrypt=dict(required=False, type='bool'), + efficiency_policy=dict(required=False, type='str'), + unix_permissions=dict(required=False, type='str'), + group_id=dict(required=False, type='int'), + user_id=dict(required=False, type='int'), + snapshot_policy=dict(required=False, type='str'), + aggr_list=dict(required=False, type='list', elements='str'), + aggr_list_multiplier=dict(required=False, type='int'), + snapdir_access=dict(required=False, type='bool'), + atime_update=dict(required=False, type='bool'), + auto_provision_as=dict(choices=['flexgroup'], required=False, type='str'), + wait_for_completion=dict(required=False, type='bool', default=False), + time_out=dict(required=False, type='int', default=180), + max_wait_time=dict(required=False, type='int', default=600), + language=dict(type='str', required=False), + qos_policy_group=dict(required=False, type='str'), + qos_adaptive_policy_group=dict(required=False, type='str'), + nvfail_enabled=dict(type='bool', required=False), + space_slo=dict(type='str', required=False, choices=['none', 'thick', 'semi-thick']), + tiering_policy=dict(type='str', required=False, choices=['snapshot-only', 'auto', 'backup', 'none', 'all']), + vserver_dr_protection=dict(type='str', required=False, choices=['protected', 'unprotected']), + comment=dict(type='str', required=False), + snapshot_auto_delete=dict(type='dict', required=False), + cutover_action=dict(required=False, type='str', choices=['abort_on_failure', 'defer_on_failure', 'force', 'wait']), + check_interval=dict(required=False, type='int', default=30), + from_vserver=dict(required=False, type='str'), + auto_remap_luns=dict(required=False, type='bool'), + force_unmap_luns=dict(required=False, type='bool'), + force_restore=dict(required=False, type='bool'), + compression=dict(required=False, type='bool'), + inline_compression=dict(required=False, type='bool'), + preserve_lun_ids=dict(required=False, type='bool'), + snapshot_restore=dict(required=False, type='str'), + nas_application_template=dict(type='dict', options=dict( + use_nas_application=dict(type='bool', default=True), + exclude_aggregates=dict(type='list', elements='str'), + flexcache=dict(type='dict', options=dict( + dr_cache=dict(type='bool'), + origin_svm_name=dict(required=True, type='str'), + origin_component_name=dict(required=True, type='str') + )), + cifs_access=dict(type='list', elements='dict', options=dict( + access=dict(type='str', choices=['change', 'full_control', 'no_access', 'read']), + user_or_group=dict(type='str') + )), + nfs_access=dict(type='list', elements='dict', options=dict( + access=dict(type='str', choices=['none', 'ro', 'rw']), + host=dict(type='str') + )), + storage_service=dict(type='str', choices=['value', 'performance', 'extreme']), + tiering=dict(type='dict', options=dict( + control=dict(type='str', choices=['required', 'best_effort', 'disallowed']), + policy=dict(type='str', choices=['all', 'auto', 'none', 'snapshot-only']), + object_stores=dict(type='list', elements='str') # create only + )) + )), + size_change_threshold=dict(type='int', default=10), + tiering_minimum_cooling_days=dict(required=False, type='int'), + logical_space_enforcement=dict(required=False, type='bool'), + logical_space_reporting=dict(required=False, type='bool'), + snaplock=dict(type='dict', options=dict( + append_mode_enabled=dict(required=False, type='bool'), + autocommit_period=dict(required=False, type='str'), + privileged_delete=dict(required=False, type='str', choices=['disabled', 'enabled', 'permanently_disabled']), + retention=dict(type='dict', options=dict( + default=dict(required=False, type='str'), + maximum=dict(required=False, type='str'), + minimum=dict(required=False, type='str') + )), + type=dict(required=False, type='str', choices=['compliance', 'enterprise', 'non_snaplock']) + )), + max_files=dict(required=False, type='int'), + analytics=dict(required=False, type='str', choices=['on', 'off']), + tags=dict(required=False, type='list', elements='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ['space_guarantee', 'space_slo'], ['auto_remap_luns', 'force_unmap_luns'] + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule(self) + self.parameters = self.na_helper.check_and_set_parameters(self.module) + self.volume_style = None + self.volume_created = False + self.issues = [] + self.sis_keys2zapi_get = dict( + efficiency_policy='policy', + compression='is-compression-enabled', + inline_compression='is-inline-compression-enabled') + self.sis_keys2zapi_set = dict( + efficiency_policy='policy-name', + compression='enable-compression', + inline_compression='enable-inline-compression') + + if self.parameters.get('size'): + self.parameters['size'] = self.parameters['size'] * \ + netapp_utils.POW2_BYTE_MAP[self.parameters['size_unit']] + self.validate_snapshot_auto_delete() + self.rest_api = netapp_utils.OntapRestAPI(self.module) + unsupported_rest_properties = ['atime_update', + 'cutover_action', + 'encrypt-destination', + 'force_restore', + 'nvfail_enabled', + 'preserve_lun_ids', + 'snapdir_access', + 'snapshot_auto_delete', + 'space_slo', + 'vserver_dr_protection'] + partially_supported_rest_properties = [['efficiency_policy', (9, 7)], ['tiering_minimum_cooling_days', (9, 8)], ['analytics', (9, 8)], + ['tags', (9, 13, 1)]] + self.unsupported_zapi_properties = ['sizing_method', 'logical_space_enforcement', 'logical_space_reporting', 'snaplock', 'analytics', 'tags'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + + if not self.use_rest: + self.setup_zapi() + if self.use_rest: + self.rest_errors() + + # REST API for application/applications if needed - will report an error when REST is not supported + self.rest_app = self.setup_rest_application() + + def setup_zapi(self): + if netapp_utils.has_netapp_lib() is False: + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + + for unsupported_zapi_property in self.unsupported_zapi_properties: + if self.parameters.get(unsupported_zapi_property) is not None: + msg = "Error: %s option is not supported with ZAPI. It can only be used with REST." % unsupported_zapi_property + msg += ' use_rest: %s.' % self.parameters['use_rest'] + if self.rest_api.fallback_to_zapi_reason: + msg += ' Conflict %s.' % self.rest_api.fallback_to_zapi_reason + self.module.fail_json(msg=msg) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def validate_snapshot_auto_delete(self): + if 'snapshot_auto_delete' in self.parameters: + for key in self.parameters['snapshot_auto_delete']: + if key not in ['commitment', 'trigger', 'target_free_space', 'delete_order', 'defer_delete', + 'prefix', 'destroy_list', 'state']: + self.module.fail_json(msg="snapshot_auto_delete option '%s' is not valid." % key) + + def setup_rest_application(self): + rest_app = None + if self.na_helper.safe_get(self.parameters, ['nas_application_template', 'use_nas_application']): + if not self.use_rest: + msg = 'Error: nas_application_template requires REST support.' + msg += ' use_rest: %s.' % self.parameters['use_rest'] + if self.rest_api.fallback_to_zapi_reason: + msg += ' Conflict %s.' % self.rest_api.fallback_to_zapi_reason + self.module.fail_json(msg=msg) + # consistency checks + # tiering policy is duplicated, make sure values are matching + tiering_policy_nas = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'policy']) + tiering_policy = self.na_helper.safe_get(self.parameters, ['tiering_policy']) + if tiering_policy_nas is not None and tiering_policy is not None and tiering_policy_nas != tiering_policy: + msg = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.' + msg += ' Found "%s" and "%s".' % (tiering_policy, tiering_policy_nas) + self.module.fail_json(msg=msg) + # aggregate_name will force a move if present + if self.parameters.get('aggregate_name') is not None: + msg = 'Conflict: aggregate_name is not supported when application template is enabled.'\ + ' Found: aggregate_name: %s' % self.parameters['aggregate_name'] + self.module.fail_json(msg=msg) + nfs_access = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'nfs_access']) + if nfs_access is not None and self.na_helper.safe_get(self.parameters, ['export_policy']) is not None: + msg = 'Conflict: export_policy option and nfs_access suboption in nas_application_template are mutually exclusive.' + self.module.fail_json(msg=msg) + rest_app = RestApplication(self.rest_api, self.parameters['vserver'], self.parameters['name']) + return rest_app + + def volume_get_iter(self, vol_name=None): + """ + Return volume-get-iter query results + :param vol_name: name of the volume + :return: NaElement + """ + volume_info = netapp_utils.zapi.NaElement('volume-get-iter') + volume_attributes = netapp_utils.zapi.NaElement('volume-attributes') + volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes') + volume_id_attributes.add_new_child('name', vol_name) + volume_id_attributes.add_new_child('vserver', self.parameters['vserver']) + volume_attributes.add_child_elem(volume_id_attributes) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(volume_attributes) + volume_info.add_child_elem(query) + + try: + result = self.server.invoke_successfully(volume_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching volume %s : %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return result + + def get_application(self): + if self.rest_app: + app, error = self.rest_app.get_application_details('nas') + self.na_helper.fail_on_error(error) + # flatten component list + comps = self.na_helper.safe_get(app, ['nas', 'application_components']) + if comps: + comp = comps[0] + app['nas'].pop('application_components') + app['nas'].update(comp) + return app['nas'] + return None + + def get_volume_attributes(self, volume_attributes, result): + # extract values from volume record + attrs = dict( + # The keys are used to index a result dictionary, values are read from a ZAPI object indexed by key_list. + # If required is True, an error is reported if a key in key_list is not found. + # We may have observed cases where the record is incomplete as the volume is being created, so it may be better to ignore missing keys + # I'm not sure there is much value in omitnone, but it preserves backward compatibility + # If omitnone is absent or False, a None value is recorded, if True, the key is not set + encrypt=dict(key_list=['encrypt'], convert_to=bool, omitnone=True), + tiering_policy=dict(key_list=['volume-comp-aggr-attributes', 'tiering-policy'], omitnone=True), + export_policy=dict(key_list=['volume-export-attributes', 'policy']), + aggregate_name=dict(key_list=['volume-id-attributes', 'containing-aggregate-name']), + flexgroup_uuid=dict(key_list=['volume-id-attributes', 'flexgroup-uuid']), + instance_uuid=dict(key_list=['volume-id-attributes', 'instance-uuid']), + junction_path=dict(key_list=['volume-id-attributes', 'junction-path'], default=''), + style_extended=dict(key_list=['volume-id-attributes', 'style-extended']), + type=dict(key_list=['volume-id-attributes', 'type'], omitnone=True), + comment=dict(key_list=['volume-id-attributes', 'comment']), + max_files=dict(key_list=['volume-inode-attributes', 'files-total'], convert_to=int), + atime_update=dict(key_list=['volume-performance-attributes', 'is-atime-update-enabled'], convert_to=bool), + qos_policy_group=dict(key_list=['volume-qos-attributes', 'policy-group-name']), + qos_adaptive_policy_group=dict(key_list=['volume-qos-attributes', 'adaptive-policy-group-name']), + # style is not present if the volume is still offline or of type: dp + volume_security_style=dict(key_list=['volume-security-attributes', 'style'], omitnone=True), + group_id=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'group-id'], convert_to=int, omitnone=True), + unix_permissions=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'permissions'], required=True), + user_id=dict(key_list=['volume-security-attributes', 'volume-security-unix-attributes', 'user-id'], convert_to=int, omitnone=True), + snapdir_access=dict(key_list=['volume-snapshot-attributes', 'snapdir-access-enabled'], convert_to=bool), + snapshot_policy=dict(key_list=['volume-snapshot-attributes', 'snapshot-policy'], omitnone=True), + percent_snapshot_space=dict(key_list=['volume-space-attributes', 'percentage-snapshot-reserve'], convert_to=int, omitnone=True), + size=dict(key_list=['volume-space-attributes', 'size'], convert_to=int), + space_guarantee=dict(key_list=['volume-space-attributes', 'space-guarantee']), + space_slo=dict(key_list=['volume-space-attributes', 'space-slo']), + nvfail_enabled=dict(key_list=['volume-state-attributes', 'is-nvfail-enabled'], convert_to=bool), + is_online=dict(key_list=['volume-state-attributes', 'state'], convert_to='bool_online', omitnone=True), + vserver_dr_protection=dict(key_list=['volume-vserver-dr-protection-attributes', 'vserver-dr-protection']), + ) + + self.na_helper.zapi_get_attrs(volume_attributes, attrs, result) + + def get_snapshot_auto_delete_attributes(self, volume_attributes, result): + attrs = dict( + commitment=dict(key_list=['volume-snapshot-autodelete-attributes', 'commitment']), + defer_delete=dict(key_list=['volume-snapshot-autodelete-attributes', 'defer-delete']), + delete_order=dict(key_list=['volume-snapshot-autodelete-attributes', 'delete-order']), + destroy_list=dict(key_list=['volume-snapshot-autodelete-attributes', 'destroy-list']), + is_autodelete_enabled=dict(key_list=['volume-snapshot-autodelete-attributes', 'is-autodelete-enabled'], convert_to=bool), + prefix=dict(key_list=['volume-snapshot-autodelete-attributes', 'prefix']), + target_free_space=dict(key_list=['volume-snapshot-autodelete-attributes', 'target-free-space'], convert_to=int), + trigger=dict(key_list=['volume-snapshot-autodelete-attributes', 'trigger']), + ) + self.na_helper.zapi_get_attrs(volume_attributes, attrs, result) + if result['is_autodelete_enabled'] is not None: + result['state'] = 'on' if result['is_autodelete_enabled'] else 'off' + del result['is_autodelete_enabled'] + + def get_volume(self, vol_name=None): + """ + Return details about the volume + :param: + name : Name of the volume + :return: Details about the volume. None if not found. + :rtype: dict + """ + result = None + if vol_name is None: + vol_name = self.parameters['name'] + if self.use_rest: + return self.get_volume_rest(vol_name) + volume_info = self.volume_get_iter(vol_name) + if self.na_helper.zapi_get_value(volume_info, ['num-records'], convert_to=int, default=0) > 0: + result = self.get_volume_record_from_zapi(volume_info, vol_name) + return result + + def get_volume_record_from_zapi(self, volume_info, vol_name): + volume_attributes = self.na_helper.zapi_get_value(volume_info, ['attributes-list', 'volume-attributes'], required=True) + result = dict(name=vol_name) + self.get_volume_attributes(volume_attributes, result) + result['uuid'] = (result['instance_uuid'] if result['style_extended'] == 'flexvol' + else result['flexgroup_uuid'] if result['style_extended'] is not None and result['style_extended'].startswith('flexgroup') + else None) + + # snapshot_auto_delete options + auto_delete = {} + self. get_snapshot_auto_delete_attributes(volume_attributes, auto_delete) + result['snapshot_auto_delete'] = auto_delete + + self.get_efficiency_info(result) + + return result + + def wrap_fail_json(self, msg, exception=None): + for issue in self.issues: + self.module.warn(issue) + if self.volume_created: + msg = 'Volume created with success, with missing attributes: %s' % msg + self.module.fail_json(msg=msg, exception=exception) + + def create_nas_application_component(self): + '''Create application component for nas template''' + required_options = ('name', 'size') + for option in required_options: + if self.parameters.get(option) is None: + self.module.fail_json(msg='Error: "%s" is required to create nas application.' % option) + + application_component = dict( + name=self.parameters['name'], + total_size=self.parameters['size'], + share_count=1, # 1 is the maximum value for nas + scale_out=(self.volume_style == 'flexgroup'), + ) + name = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'storage_service']) + if name is not None: + application_component['storage_service'] = dict(name=name) + + flexcache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache']) + if flexcache is not None: + application_component['flexcache'] = dict( + origin=dict( + svm=dict(name=flexcache['origin_svm_name']), + component=dict(name=flexcache['origin_component_name']) + ) + ) + # scale_out should be absent or set to True for FlexCache + del application_component['scale_out'] + dr_cache = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache', 'dr_cache']) + if dr_cache is not None: + application_component['flexcache']['dr_cache'] = dr_cache + + tiering = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering']) + if tiering is not None or self.parameters.get('tiering_policy') is not None: + application_component['tiering'] = {} + if tiering is None: + tiering = {} + if 'policy' not in tiering: + tiering['policy'] = self.parameters.get('tiering_policy') + for attr in ('control', 'policy', 'object_stores'): + value = tiering.get(attr) + if attr == 'object_stores' and value is not None: + value = [dict(name=x) for x in value] + if value is not None: + application_component['tiering'][attr] = value + + if self.get_qos_policy_group() is not None: + application_component['qos'] = { + "policy": { + "name": self.get_qos_policy_group(), + } + } + if self.parameters.get('export_policy') is not None: + application_component['export_policy'] = { + "name": self.parameters['export_policy'], + } + return application_component + + def create_volume_body(self): + '''Create body for nas template''' + nas = dict(application_components=[self.create_nas_application_component()]) + value = self.na_helper.safe_get(self.parameters, ['snapshot_policy']) + if value is not None: + nas['protection_type'] = {'local_policy': value} + for attr in ('nfs_access', 'cifs_access'): + value = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr]) + if value is not None: + # we expect value to be a list of dicts, with maybe some empty entries + value = self.na_helper.filter_out_none_entries(value) + if value: + nas[attr] = value + for attr in ('exclude_aggregates',): + values = self.na_helper.safe_get(self.parameters, ['nas_application_template', attr]) + if values: + nas[attr] = [dict(name=name) for name in values] + return self.rest_app.create_application_body("nas", nas, smart_container=True) + + def create_nas_application(self): + '''Use REST application/applications nas template to create a volume''' + body, error = self.create_volume_body() + self.na_helper.fail_on_error(error) + response, error = self.rest_app.create_application(body) + self.na_helper.fail_on_error(error) + return response + + def create_volume(self): + '''Create ONTAP volume''' + if self.rest_app: + return self.create_nas_application() + if self.use_rest: + return self.create_volume_rest() + if self.volume_style == 'flexgroup': + return self.create_volume_async() + + options = self.create_volume_options() + volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create', **options) + try: + self.server.invoke_successfully(volume_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else '' + self.module.fail_json(msg='Error provisioning volume %s%s: %s' + % (self.parameters['name'], size_msg, to_native(error)), + exception=traceback.format_exc()) + + if self.parameters.get('wait_for_completion'): + # round off time_out + retries = (self.parameters['time_out'] + 5) // 10 + is_online = None + errors = [] + while not is_online and retries > 0: + try: + current = self.get_volume() + is_online = None if current is None else current['is_online'] + except KeyError as err: + # get_volume may receive incomplete data as the volume is being created + errors.append(repr(err)) + if not is_online: + time.sleep(10) + retries -= 1 + if not is_online: + errors.append("Timeout after %s seconds" % self.parameters['time_out']) + self.module.fail_json(msg='Error waiting for volume %s to come online: %s' + % (self.parameters['name'], str(errors))) + return None + + def create_volume_async(self): + ''' + create volume async. + ''' + options = self.create_volume_options() + volume_create = netapp_utils.zapi.NaElement.create_node_with_children('volume-create-async', **options) + if self.parameters.get('aggr_list'): + aggr_list_obj = netapp_utils.zapi.NaElement('aggr-list') + volume_create.add_child_elem(aggr_list_obj) + for aggr in self.parameters['aggr_list']: + aggr_list_obj.add_new_child('aggr-name', aggr) + try: + result = self.server.invoke_successfully(volume_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + size_msg = ' of size %s' % self.parameters['size'] if self.parameters.get('size') is not None else '' + self.module.fail_json(msg='Error provisioning volume %s%s: %s' + % (self.parameters['name'], size_msg, to_native(error)), + exception=traceback.format_exc()) + self.check_invoke_result(result, 'create') + return None + + def create_volume_options(self): + '''Set volume options for create operation''' + options = {} + if self.volume_style == 'flexgroup': + options['volume-name'] = self.parameters['name'] + if self.parameters.get('aggr_list_multiplier') is not None: + options['aggr-list-multiplier'] = str(self.parameters['aggr_list_multiplier']) + if self.parameters.get('auto_provision_as') is not None: + options['auto-provision-as'] = self.parameters['auto_provision_as'] + if self.parameters.get('space_guarantee') is not None: + options['space-guarantee'] = self.parameters['space_guarantee'] + else: + options['volume'] = self.parameters['name'] + if self.parameters.get('aggregate_name') is None: + self.module.fail_json(msg='Error provisioning volume %s: aggregate_name is required' + % self.parameters['name']) + options['containing-aggr-name'] = self.parameters['aggregate_name'] + if self.parameters.get('space_guarantee') is not None: + options['space-reserve'] = self.parameters['space_guarantee'] + + if self.parameters.get('size') is not None: + options['size'] = str(self.parameters['size']) + if self.parameters.get('snapshot_policy') is not None: + options['snapshot-policy'] = self.parameters['snapshot_policy'] + if self.parameters.get('unix_permissions') is not None: + options['unix-permissions'] = self.parameters['unix_permissions'] + if self.parameters.get('group_id') is not None: + options['group-id'] = str(self.parameters['group_id']) + if self.parameters.get('user_id') is not None: + options['user-id'] = str(self.parameters['user_id']) + if self.parameters.get('volume_security_style') is not None: + options['volume-security-style'] = self.parameters['volume_security_style'] + if self.parameters.get('export_policy') is not None: + options['export-policy'] = self.parameters['export_policy'] + if self.parameters.get('junction_path') is not None: + options['junction-path'] = self.parameters['junction_path'] + if self.parameters.get('comment') is not None: + options['volume-comment'] = self.parameters['comment'] + if self.parameters.get('type') is not None: + options['volume-type'] = self.parameters['type'] + if self.parameters.get('percent_snapshot_space') is not None: + options['percentage-snapshot-reserve'] = str(self.parameters['percent_snapshot_space']) + if self.parameters.get('language') is not None: + options['language-code'] = self.parameters['language'] + if self.parameters.get('qos_policy_group') is not None: + options['qos-policy-group-name'] = self.parameters['qos_policy_group'] + if self.parameters.get('qos_adaptive_policy_group') is not None: + options['qos-adaptive-policy-group-name'] = self.parameters['qos_adaptive_policy_group'] + if self.parameters.get('nvfail_enabled') is not None: + options['is-nvfail-enabled'] = str(self.parameters['nvfail_enabled']) + if self.parameters.get('space_slo') is not None: + options['space-slo'] = self.parameters['space_slo'] + if self.parameters.get('tiering_policy') is not None: + options['tiering-policy'] = self.parameters['tiering_policy'] + if self.parameters.get('encrypt') is not None: + options['encrypt'] = self.na_helper.get_value_for_bool(False, self.parameters['encrypt'], 'encrypt') + if self.parameters.get('vserver_dr_protection') is not None: + options['vserver-dr-protection'] = self.parameters['vserver_dr_protection'] + if self.parameters['is_online']: + options['volume-state'] = 'online' + else: + options['volume-state'] = 'offline' + return options + + def rest_delete_volume(self, current): + """ + Delete the volume using REST DELETE method (it scrubs better than ZAPI). + """ + uuid = self.parameters['uuid'] + if uuid is None: + self.module.fail_json(msg='Could not read UUID for volume %s in delete.' % self.parameters['name']) + unmount_error = self.volume_unmount_rest(fail_on_error=False) if current.get('junction_path') else None + dummy, error = rest_generic.delete_async(self.rest_api, 'storage/volumes', uuid, job_timeout=self.parameters['time_out']) + self.na_helper.fail_on_error(error, previous_errors=(['Error unmounting volume: %s' % unmount_error] if unmount_error else None)) + if unmount_error: + self.module.warn('Volume was successfully deleted though unmount failed with: %s' % unmount_error) + + def delete_volume_async(self, current): + '''Delete ONTAP volume for infinite or flexgroup types ''' + errors = None + if current['is_online']: + dummy, errors = self.change_volume_state(call_from_delete_vol=True) + volume_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-destroy-async', **{'volume-name': self.parameters['name']}) + try: + result = self.server.invoke_successfully(volume_delete, enable_tunneling=True) + self.check_invoke_result(result, 'delete') + except netapp_utils.zapi.NaApiError as error: + msg = 'Error deleting volume %s: %s.' % (self.parameters['name'], to_native(error)) + if errors: + msg += ' Previous errors when offlining/unmounting volume: %s' % ' - '.join(errors) + self.module.fail_json(msg=msg) + + def delete_volume_sync(self, current, unmount_offline): + '''Delete ONTAP volume for flexvol types ''' + options = {'name': self.parameters['name']} + if unmount_offline: + options['unmount-and-offline'] = 'true' + volume_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-destroy', **options) + try: + self.server.invoke_successfully(volume_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + return error + return None + + def delete_volume(self, current): + '''Delete ONTAP volume''' + if self.use_rest and self.parameters['uuid'] is not None: + return self.rest_delete_volume(current) + if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup': + return self.delete_volume_async(current) + errors = [] + error = self.delete_volume_sync(current, True) + if error: + errors.append('volume delete failed with unmount-and-offline option: %s' % to_native(error)) + error = self.delete_volume_sync(current, False) + if error: + errors.append('volume delete failed without unmount-and-offline option: %s' % to_native(error)) + if errors: + self.module.fail_json(msg='Error deleting volume %s: %s' + % (self.parameters['name'], ' - '.join(errors)), + exception=traceback.format_exc()) + + def move_volume(self, encrypt_destination=None): + '''Move volume from source aggregate to destination aggregate''' + if self.use_rest: + return self.move_volume_rest(encrypt_destination) + volume_move = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-move-start', **{'source-volume': self.parameters['name'], + 'vserver': self.parameters['vserver'], + 'dest-aggr': self.parameters['aggregate_name']}) + if self.parameters.get('cutover_action'): + volume_move.add_new_child('cutover-action', self.parameters['cutover_action']) + if encrypt_destination is not None: + volume_move.add_new_child('encrypt-destination', self.na_helper.get_value_for_bool(False, encrypt_destination)) + try: + self.cluster.invoke_successfully(volume_move, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + rest_error = self.move_volume_with_rest_passthrough(encrypt_destination) + if rest_error is not None: + self.module.fail_json(msg='Error moving volume %s: %s - Retry failed with REST error: %s' + % (self.parameters['name'], to_native(error), rest_error), + exception=traceback.format_exc()) + if self.parameters.get('wait_for_completion'): + self.wait_for_volume_move() + + def move_volume_with_rest_passthrough(self, encrypt_destination=None): + # MDV volume will fail on a move, but will work using the REST CLI pass through + # vol move start -volume MDV_CRS_d6b0b313ff5611e9837100a098544e51_A -destination-aggregate data_a3 -vserver wmc66-a + # if REST isn't available fail with the original error + if not self.use_rest: + return False + # if REST exists let's try moving using the passthrough CLI + api = 'private/cli/volume/move/start' + body = {'destination-aggregate': self.parameters['aggregate_name'], + } + if encrypt_destination is not None: + body['encrypt-destination'] = encrypt_destination + query = {'volume': self.parameters['name'], + 'vserver': self.parameters['vserver'] + } + dummy, error = self.rest_api.patch(api, body, query) + return error + + def check_volume_move_state(self, result): + if self.use_rest: + volume_move_status = self.na_helper.safe_get(result, ['movement', 'state']) + else: + volume_move_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info').get_child_content('state') + # We have 5 states that can be returned. + # warning and healthy are state where the move is still going so we don't need to do anything for thouse. + # success - volume move is completed in REST. + if volume_move_status in ['success', 'done']: + return False + # ZAPI returns failed or alert, REST returns failed or aborted. + if volume_move_status in ['failed', 'alert', 'aborted']: + self.module.fail_json(msg='Error moving volume %s: %s' % + (self.parameters['name'], result.get_child_by_name('attributes-list').get_child_by_name('volume-move-info') + .get_child_content('details'))) + return True + + def wait_for_volume_move(self): + volume_move_iter = netapp_utils.zapi.NaElement('volume-move-get-iter') + volume_move_info = netapp_utils.zapi.NaElement('volume-move-info') + volume_move_info.add_new_child('volume', self.parameters['name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(volume_move_info) + volume_move_iter.add_child_elem(query) + error = self.wait_for_task_completion(volume_move_iter, self.check_volume_move_state) + if error: + self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def wait_for_volume_move_rest(self): + api = "storage/volumes" + query = { + 'name': self.parameters['name'], + 'movement.destination_aggregate.name': self.parameters['aggregate_name'], + 'fields': 'movement.state' + } + error = self.wait_for_task_completion_rest(api, query, self.check_volume_move_state) + if error: + self.module.fail_json(msg='Error getting volume move status: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def check_volume_encryption_conversion_state(self, result): + if self.use_rest: + volume_encryption_conversion_status = self.na_helper.safe_get(result, ['encryption', 'status', 'message']) + else: + volume_encryption_conversion_status = result.get_child_by_name('attributes-list').get_child_by_name('volume-encryption-conversion-info')\ + .get_child_content('status') + # REST returns running or initializing, ZAPI returns running if encryption in progress. + if volume_encryption_conversion_status in ['running', 'initializing']: + return True + # If encryprion is completed, REST do have encryption status message. + if volume_encryption_conversion_status in ['Not currently going on.', None]: + return False + self.module.fail_json(msg='Error converting encryption for volume %s: %s' % + (self.parameters['name'], volume_encryption_conversion_status)) + + def wait_for_volume_encryption_conversion(self): + if self.use_rest: + return self.wait_for_volume_encryption_conversion_rest() + volume_encryption_conversion_iter = netapp_utils.zapi.NaElement('volume-encryption-conversion-get-iter') + volume_encryption_conversion_info = netapp_utils.zapi.NaElement('volume-encryption-conversion-info') + volume_encryption_conversion_info.add_new_child('volume', self.parameters['name']) + volume_encryption_conversion_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(volume_encryption_conversion_info) + volume_encryption_conversion_iter.add_child_elem(query) + error = self.wait_for_task_completion(volume_encryption_conversion_iter, self.check_volume_encryption_conversion_state) + if error: + self.module.fail_json(msg='Error getting volume encryption_conversion status: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def wait_for_volume_encryption_conversion_rest(self): + api = "storage/volumes" + query = { + 'name': self.parameters['name'], + 'fields': 'encryption' + } + error = self.wait_for_task_completion_rest(api, query, self.check_volume_encryption_conversion_state) + if error: + self.module.fail_json(msg='Error getting volume encryption_conversion status: %s' % (to_native(error)), + exception=traceback.format_exc()) + + def wait_for_task_completion(self, zapi_iter, check_state): + retries = self.parameters['max_wait_time'] // (self.parameters['check_interval'] + 1) + fail_count = 0 + while retries > 0: + try: + result = self.cluster.invoke_successfully(zapi_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if fail_count < 3: + fail_count += 1 + retries -= 1 + time.sleep(self.parameters['check_interval']) + continue + return error + if int(result.get_child_content('num-records')) == 0: + return None + # reset fail count to 0 + fail_count = 0 + retry_required = check_state(result) + if not retry_required: + return None + time.sleep(self.parameters['check_interval']) + retries -= 1 + + def wait_for_task_completion_rest(self, api, query, check_state): + retries = self.parameters['max_wait_time'] // (self.parameters['check_interval'] + 1) + fail_count = 0 + while retries > 0: + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + if fail_count < 3: + fail_count += 1 + retries -= 1 + time.sleep(self.parameters['check_interval']) + continue + return error + if record is None: + return None + # reset fail count to 0 + fail_count = 0 + retry_required = check_state(record) + if not retry_required: + return None + time.sleep(self.parameters['check_interval']) + retries -= 1 + + def rename_volume(self): + """ + Rename the volume. + + Note: 'is_infinite' needs to be set to True in order to rename an + Infinite Volume. Use time_out parameter to set wait time for rename completion. + """ + if self.use_rest: + return self.rename_volume_rest() + vol_rename_zapi, vol_name_zapi = ['volume-rename-async', 'volume-name'] if self.parameters['is_infinite']\ + else ['volume-rename', 'volume'] + volume_rename = netapp_utils.zapi.NaElement.create_node_with_children( + vol_rename_zapi, **{vol_name_zapi: self.parameters['from_name'], + 'new-volume-name': str(self.parameters['name'])}) + try: + result = self.server.invoke_successfully(volume_rename, enable_tunneling=True) + if vol_rename_zapi == 'volume-rename-async': + self.check_invoke_result(result, 'rename') + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error renaming volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def resize_volume(self): + """ + Re-size the volume. + + Note: 'is_infinite' needs to be set to True in order to resize an + Infinite Volume. + """ + if self.use_rest: + return self.resize_volume_rest() + + vol_size_zapi, vol_name_zapi = ['volume-size-async', 'volume-name']\ + if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\ + else ['volume-size', 'volume'] + volume_resize = netapp_utils.zapi.NaElement.create_node_with_children( + vol_size_zapi, **{vol_name_zapi: self.parameters['name'], + 'new-size': str(self.parameters['size'])}) + try: + result = self.server.invoke_successfully(volume_resize, enable_tunneling=True) + if vol_size_zapi == 'volume-size-async': + self.check_invoke_result(result, 'resize') + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error re-sizing volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + return None + + def start_encryption_conversion(self, encrypt_destination): + if encrypt_destination: + if self.use_rest: + return self.encryption_conversion_rest() + zapi = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-encryption-conversion-start', **{'volume': self.parameters['name']}) + try: + self.server.invoke_successfully(zapi, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error enabling encryption for volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if self.parameters.get('wait_for_completion'): + self.wait_for_volume_encryption_conversion() + else: + self.module.warn('disabling encryption requires cluster admin permissions.') + self.move_volume(encrypt_destination) + + def change_volume_state(self, call_from_delete_vol=False): + """ + Change volume's state (offline/online). + """ + if self.use_rest: + return self.change_volume_state_rest() + if self.parameters['is_online'] and not call_from_delete_vol: # Desired state is online, setup zapi APIs respectively + vol_state_zapi, vol_name_zapi, action = ['volume-online-async', 'volume-name', 'online']\ + if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\ + else ['volume-online', 'name', 'online'] + else: # Desired state is offline, setup zapi APIs respectively + vol_state_zapi, vol_name_zapi, action = ['volume-offline-async', 'volume-name', 'offline']\ + if (self.parameters['is_infinite'] or self.volume_style == 'flexgroup')\ + else ['volume-offline', 'name', 'offline'] + volume_unmount = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-unmount', **{'volume-name': self.parameters['name']}) + volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children( + vol_state_zapi, **{vol_name_zapi: self.parameters['name']}) + + errors = [] + if not self.parameters['is_online'] or call_from_delete_vol: # Unmount before offline + try: + self.server.invoke_successfully(volume_unmount, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + errors.append('Error unmounting volume %s: %s' % (self.parameters['name'], to_native(error))) + state = "online" if self.parameters['is_online'] and not call_from_delete_vol else "offline" + try: + result = self.server.invoke_successfully(volume_change_state, enable_tunneling=True) + if self.volume_style == 'flexgroup' or self.parameters['is_infinite']: + self.check_invoke_result(result, action) + except netapp_utils.zapi.NaApiError as error: + errors.append('Error changing the state of volume %s to %s: %s' % (self.parameters['name'], state, to_native(error))) + if errors and not call_from_delete_vol: + self.module.fail_json(msg=', '.join(errors), exception=traceback.format_exc()) + return state, errors + + def create_volume_attribute(self, zapi_object, parent_attribute, attribute, option_name, convert_from=None): + """ + + :param parent_attribute: + :param child_attribute: + :param value: + :return: + """ + value = self.parameters.get(option_name) + if value is None: + return + if convert_from == int: + value = str(value) + elif convert_from == bool: + value = self.na_helper.get_value_for_bool(False, value, option_name) + + if zapi_object is None: + parent_attribute.add_new_child(attribute, value) + return + if isinstance(zapi_object, str): + # retrieve existing in parent, or create a new one + element = parent_attribute.get_child_by_name(zapi_object) + zapi_object = netapp_utils.zapi.NaElement(zapi_object) if element is None else element + zapi_object.add_new_child(attribute, value) + parent_attribute.add_child_elem(zapi_object) + + def build_zapi_volume_modify_iter(self, params): + vol_mod_iter = netapp_utils.zapi.NaElement('volume-modify-iter-async' if self.volume_style == 'flexgroup' or self.parameters['is_infinite'] + else 'volume-modify-iter') + + attributes = netapp_utils.zapi.NaElement('attributes') + vol_mod_attributes = netapp_utils.zapi.NaElement('volume-attributes') + # Volume-attributes is split in to 25 sub categories + # volume-inode-attributes + vol_inode_attributes = netapp_utils.zapi.NaElement('volume-inode-attributes') + self.create_volume_attribute(vol_inode_attributes, vol_mod_attributes, 'files-total', 'max_files', int) + # volume-space-attributes + vol_space_attributes = netapp_utils.zapi.NaElement('volume-space-attributes') + self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-guarantee', 'space_guarantee') + self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'percentage-snapshot-reserve', 'percent_snapshot_space', int) + self.create_volume_attribute(vol_space_attributes, vol_mod_attributes, 'space-slo', 'space_slo') + # volume-snapshot-attributes + vol_snapshot_attributes = netapp_utils.zapi.NaElement('volume-snapshot-attributes') + self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes, 'snapshot-policy', 'snapshot_policy') + self.create_volume_attribute(vol_snapshot_attributes, vol_mod_attributes, 'snapdir-access-enabled', 'snapdir_access', bool) + # volume-export-attributes + self.create_volume_attribute('volume-export-attributes', vol_mod_attributes, 'policy', 'export_policy') + # volume-security-attributes + if self.parameters.get('unix_permissions') is not None or self.parameters.get('group_id') is not None or self.parameters.get('user_id') is not None: + vol_security_attributes = netapp_utils.zapi.NaElement('volume-security-attributes') + vol_security_unix_attributes = netapp_utils.zapi.NaElement('volume-security-unix-attributes') + self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes, 'permissions', 'unix_permissions') + self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes, 'group-id', 'group_id', int) + self.create_volume_attribute(vol_security_unix_attributes, vol_security_attributes, 'user-id', 'user_id', int) + vol_mod_attributes.add_child_elem(vol_security_attributes) + if params and params.get('volume_security_style') is not None: + self.create_volume_attribute('volume-security-attributes', vol_mod_attributes, 'style', 'volume_security_style') + + # volume-performance-attributes + self.create_volume_attribute('volume-performance-attributes', vol_mod_attributes, 'is-atime-update-enabled', 'atime_update', bool) + # volume-qos-attributes + self.create_volume_attribute('volume-qos-attributes', vol_mod_attributes, 'policy-group-name', 'qos_policy_group') + self.create_volume_attribute('volume-qos-attributes', vol_mod_attributes, 'adaptive-policy-group-name', 'qos_adaptive_policy_group') + # volume-comp-aggr-attributes + if params and params.get('tiering_policy') is not None: + self.create_volume_attribute('volume-comp-aggr-attributes', vol_mod_attributes, 'tiering-policy', 'tiering_policy') + # volume-state-attributes + self.create_volume_attribute('volume-state-attributes', vol_mod_attributes, 'is-nvfail-enabled', 'nvfail_enabled', bool) + # volume-dr-protection-attributes + self.create_volume_attribute('volume-vserver-dr-protection-attributes', vol_mod_attributes, 'vserver-dr-protection', 'vserver_dr_protection') + # volume-id-attributes + self.create_volume_attribute('volume-id-attributes', vol_mod_attributes, 'comment', 'comment') + # End of Volume-attributes sub attributes + attributes.add_child_elem(vol_mod_attributes) + + query = netapp_utils.zapi.NaElement('query') + vol_query_attributes = netapp_utils.zapi.NaElement('volume-attributes') + self.create_volume_attribute('volume-id-attributes', vol_query_attributes, 'name', 'name') + query.add_child_elem(vol_query_attributes) + vol_mod_iter.add_child_elem(attributes) + vol_mod_iter.add_child_elem(query) + return vol_mod_iter + + def volume_modify_attributes(self, params): + """ + modify volume parameter 'export_policy','unix_permissions','snapshot_policy','space_guarantee', 'percent_snapshot_space', + 'qos_policy_group', 'qos_adaptive_policy_group' + """ + if self.use_rest: + return self.volume_modify_attributes_rest(params) + vol_mod_iter = self.build_zapi_volume_modify_iter(params) + try: + result = self.server.invoke_successfully(vol_mod_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + error_msg = to_native(error) + if 'volume-comp-aggr-attributes' in error_msg: + error_msg += ". Added info: tiering option requires 9.4 or later." + self.wrap_fail_json(msg='Error modifying volume %s: %s' + % (self.parameters['name'], error_msg), + exception=traceback.format_exc()) + + failures = result.get_child_by_name('failure-list') + # handle error if modify space, policy, or unix-permissions parameter fails + if failures is not None: + error_msgs = [ + failures.get_child_by_name(return_info).get_child_content( + 'error-message' + ) + for return_info in ( + 'volume-modify-iter-info', + 'volume-modify-iter-async-info', + ) + if failures.get_child_by_name(return_info) is not None + ] + if error_msgs and any(x is not None for x in error_msgs): + self.wrap_fail_json(msg="Error modifying volume %s: %s" + % (self.parameters['name'], ' --- '.join(error_msgs)), + exception=traceback.format_exc()) + if self.volume_style == 'flexgroup' or self.parameters['is_infinite']: + success = self.na_helper.safe_get(result, ['success-list', 'volume-modify-iter-async-info']) + results = {} + for key in ('status', 'jobid'): + if success and success.get_child_by_name(key): + results[key] = success[key] + + status = results.get('status') + if status == 'in_progress' and 'jobid' in results: + if self.parameters['time_out'] == 0: + return + error = self.check_job_status(results['jobid']) + if error is None: + return + self.wrap_fail_json(msg='Error when modifying volume: %s' % error) + self.wrap_fail_json(msg='Unexpected error when modifying volume: result is: %s' % str(result.to_string())) + + def volume_mount(self): + """ + Mount an existing volume in specified junction_path + :return: None + """ + if self.use_rest: + return self.volume_mount_rest() + vol_mount = netapp_utils.zapi.NaElement('volume-mount') + vol_mount.add_new_child('volume-name', self.parameters['name']) + vol_mount.add_new_child('junction-path', self.parameters['junction_path']) + try: + self.server.invoke_successfully(vol_mount, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error mounting volume %s on path %s: %s' + % (self.parameters['name'], self.parameters['junction_path'], + to_native(error)), exception=traceback.format_exc()) + + def volume_unmount(self): + """ + Unmount an existing volume + :return: None + """ + if self.use_rest: + return self.volume_unmount_rest() + vol_unmount = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-unmount', **{'volume-name': self.parameters['name']}) + try: + self.server.invoke_successfully(vol_unmount, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error unmounting volume %s: %s' + % (self.parameters['name'], to_native(error)), exception=traceback.format_exc()) + + def modify_volume(self, modify): + '''Modify volume action''' + # snaplock requires volume in unmount state. + if modify.get('junction_path') == '': + self.volume_unmount() + attributes = modify.keys() + for attribute in attributes: + if attribute in ['space_guarantee', 'export_policy', 'unix_permissions', 'group_id', 'user_id', 'tiering_policy', + 'snapshot_policy', 'percent_snapshot_space', 'snapdir_access', 'atime_update', 'volume_security_style', + 'nvfail_enabled', 'space_slo', 'qos_policy_group', 'qos_adaptive_policy_group', 'vserver_dr_protection', + 'comment', 'logical_space_enforcement', 'logical_space_reporting', 'tiering_minimum_cooling_days', + 'snaplock', 'max_files', 'analytics', 'tags']: + self.volume_modify_attributes(modify) + break + if 'snapshot_auto_delete' in attributes and not self.use_rest: + # Rest doesn't support any snapshot_auto_delete option other than is_autodelete_enabled. For now i've completely + # disabled this in rest + self.set_snapshot_auto_delete() + # don't mount or unmount when offline + if modify.get('junction_path'): + self.volume_mount() + if 'size' in attributes: + self.resize_volume() + if 'aggregate_name' in attributes: + # keep it last, as it may take some time + # handle change in encryption as part of the move + # allow for encrypt/decrypt only if encrypt present in attributes. + self.move_volume(modify.get('encrypt')) + elif 'encrypt' in attributes: + self.start_encryption_conversion(self.parameters['encrypt']) + + def get_volume_style(self, current): + '''Get volume style, infinite or standard flexvol''' + if current is not None: + return current.get('style_extended') + if self.parameters.get('aggr_list') or self.parameters.get('aggr_list_multiplier') or self.parameters.get('auto_provision_as'): + if self.use_rest and self.parameters.get('auto_provision_as') and self.parameters.get('aggr_list_multiplier') is None: + self.parameters['aggr_list_multiplier'] = 1 + return 'flexgroup' + return None + + def get_job(self, jobid, server): + """ + Get job details by id + """ + job_get = netapp_utils.zapi.NaElement('job-get') + job_get.add_new_child('job-id', jobid) + try: + result = server.invoke_successfully(job_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + if to_native(error.code) == "15661": + # Not found + return None + self.wrap_fail_json(msg='Error fetching job info: %s' % to_native(error), + exception=traceback.format_exc()) + job_info = result.get_child_by_name('attributes').get_child_by_name('job-info') + return { + 'job-progress': job_info['job-progress'], + 'job-state': job_info['job-state'], + 'job-completion': job_info['job-completion'] if job_info.get_child_by_name('job-completion') is not None else None + } + + def check_job_status(self, jobid): + """ + Loop until job is complete + """ + server = self.server + sleep_time = 5 + time_out = self.parameters['time_out'] + error = 'timeout' + + if time_out <= 0: + results = self.get_job(jobid, server) + + while time_out > 0: + results = self.get_job(jobid, server) + # If running as cluster admin, the job is owned by cluster vserver + # rather than the target vserver. + if results is None and server == self.server: + results = netapp_utils.get_cserver(self.server) + server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) + continue + if results is None: + error = 'cannot locate job with id: %d' % int(jobid) + break + if results['job-state'] in ('queued', 'running'): + time.sleep(sleep_time) + time_out -= sleep_time + continue + if results['job-state'] in ('success', 'failure'): + break + else: + self.wrap_fail_json(msg='Unexpected job status in: %s' % repr(results)) + + if results is not None: + if results['job-state'] == 'success': + error = None + elif results['job-state'] in ('queued', 'running'): + error = 'job completion exceeded expected timer of: %s seconds' % \ + self.parameters['time_out'] + elif results['job-completion'] is not None: + error = results['job-completion'] + else: + error = results['job-progress'] + return error + + def check_invoke_result(self, result, action): + ''' + check invoked api call back result. + ''' + results = {} + for key in ('result-status', 'result-jobid'): + if result.get_child_by_name(key): + results[key] = result[key] + + status = results.get('result-status') + if status == 'in_progress' and 'result-jobid' in results: + if self.parameters['time_out'] == 0: + return + error = self.check_job_status(results['result-jobid']) + if error is None: + return + else: + self.wrap_fail_json(msg='Error when %s volume: %s' % (action, error)) + if status == 'failed': + self.wrap_fail_json(msg='Operation failed when %s volume.' % action) + + def set_efficiency_attributes(self, options): + for key, attr in self.sis_keys2zapi_set.items(): + value = self.parameters.get(key) + if value is not None: + if self.argument_spec[key]['type'] == 'bool': + value = self.na_helper.get_value_for_bool(False, value) + options[attr] = value + # ZAPI requires compression to be set for inline-compression + if options.get('enable-inline-compression') == 'true' and 'enable-compression' not in options: + options['enable-compression'] = 'true' + + def set_efficiency_config(self): + '''Set efficiency policy and compression attributes''' + options = {'path': '/vol/' + self.parameters['name']} + efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable', **options) + try: + self.server.invoke_successfully(efficiency_enable, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + # Error 40043 denotes an Operation has already been enabled. + if to_native(error.code) != "40043": + self.wrap_fail_json(msg='Error enable efficiency on volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + self.set_efficiency_attributes(options) + efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config', **options) + try: + self.server.invoke_successfully(efficiency_start, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.wrap_fail_json(msg='Error setting up efficiency attributes on volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def set_efficiency_config_async(self): + """Set efficiency policy and compression attributes in asynchronous mode""" + options = {'volume-name': self.parameters['name']} + efficiency_enable = netapp_utils.zapi.NaElement.create_node_with_children('sis-enable-async', **options) + try: + result = self.server.invoke_successfully(efficiency_enable, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.wrap_fail_json(msg='Error enable efficiency on volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + self.check_invoke_result(result, 'enable efficiency on') + + self.set_efficiency_attributes(options) + efficiency_start = netapp_utils.zapi.NaElement.create_node_with_children('sis-set-config-async', **options) + try: + result = self.server.invoke_successfully(efficiency_start, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.wrap_fail_json(msg='Error setting up efficiency attributes on volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + self.check_invoke_result(result, 'set efficiency policy on') + + def get_efficiency_info(self, return_value): + """ + get the name of the efficiency policy assigned to volume, as well as compression values + if attribute does not exist, set its value to None + :return: update return_value dict. + """ + sis_info = netapp_utils.zapi.NaElement('sis-get-iter') + sis_status_info = netapp_utils.zapi.NaElement('sis-status-info') + sis_status_info.add_new_child('path', '/vol/' + self.parameters['name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(sis_status_info) + sis_info.add_child_elem(query) + try: + result = self.server.invoke_successfully(sis_info, True) + except netapp_utils.zapi.NaApiError as error: + # Don't error out if efficiency settings cannot be read. We'll fail if they need to be set. + if error.message.startswith('Insufficient privileges: user ') and error.message.endswith(' does not have read access to this resource'): + self.issues.append('cannot read volume efficiency options (as expected when running as vserver): %s' % to_native(error)) + return + self.wrap_fail_json(msg='Error fetching efficiency policy for volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + for key in self.sis_keys2zapi_get: + return_value[key] = None + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + sis_attributes = result.get_child_by_name('attributes-list'). get_child_by_name('sis-status-info') + for key, attr in self.sis_keys2zapi_get.items(): + value = sis_attributes.get_child_content(attr) + if self.argument_spec[key]['type'] == 'bool': + value = self.na_helper.get_value_for_bool(True, value) + return_value[key] = value + + def modify_volume_efficiency_config(self, efficiency_config_modify_value): + if self.use_rest: + return self.set_efficiency_rest() + if efficiency_config_modify_value == 'async': + self.set_efficiency_config_async() + else: + self.set_efficiency_config() + + def set_snapshot_auto_delete(self): + options = {'volume': self.parameters['name']} + desired_options = self.parameters['snapshot_auto_delete'] + for key, value in desired_options.items(): + options['option-name'] = key + options['option-value'] = str(value) + snapshot_auto_delete = netapp_utils.zapi.NaElement.create_node_with_children('snapshot-autodelete-set-option', **options) + try: + self.server.invoke_successfully(snapshot_auto_delete, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.wrap_fail_json(msg='Error setting snapshot auto delete options for volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def rehost_volume(self): + volume_rehost = netapp_utils.zapi.NaElement.create_node_with_children( + 'volume-rehost', **{'vserver': self.parameters['from_vserver'], + 'destination-vserver': self.parameters['vserver'], + 'volume': self.parameters['name']}) + if self.parameters.get('auto_remap_luns') is not None: + volume_rehost.add_new_child('auto-remap-luns', str(self.parameters['auto_remap_luns'])) + if self.parameters.get('force_unmap_luns') is not None: + volume_rehost.add_new_child('force-unmap-luns', str(self.parameters['force_unmap_luns'])) + try: + self.cluster.invoke_successfully(volume_rehost, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error rehosting volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def snapshot_restore_volume(self): + if self.use_rest: + return self.snapshot_restore_volume_rest() + snapshot_restore = netapp_utils.zapi.NaElement.create_node_with_children( + 'snapshot-restore-volume', **{'snapshot': self.parameters['snapshot_restore'], + 'volume': self.parameters['name']}) + if self.parameters.get('force_restore') is not None: + snapshot_restore.add_new_child('force', str(self.parameters['force_restore'])) + if self.parameters.get('preserve_lun_ids') is not None: + snapshot_restore.add_new_child('preserve-lun-ids', str(self.parameters['preserve_lun_ids'])) + try: + self.server.invoke_successfully(snapshot_restore, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error restoring volume %s: %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def ignore_small_change(self, current, attribute, threshold): + if attribute in current and current[attribute] != 0 and self.parameters.get(attribute) is not None: + # ignore a less than XX% difference + change = abs(current[attribute] - self.parameters[attribute]) * 100.0 / current[attribute] + if change < threshold: + self.parameters[attribute] = current[attribute] + if change > 0.1: + self.module.warn('resize request for %s ignored: %.1f%% is below the threshold: %.1f%%' % (attribute, change, threshold)) + + def adjust_sizes(self, current, after_create): + """ + ignore small change in size by resetting expectations + """ + if after_create: + # ignore change in size immediately after a create: + self.parameters['size'] = current['size'] + # inodes are not set in create + return + self.ignore_small_change(current, 'size', self.parameters['size_change_threshold']) + self.ignore_small_change(current, 'max_files', netapp_utils.get_feature(self.module, 'max_files_change_threshold')) + + def validate_snaplock_changes(self, current, modify=None, after_create=False): + if not self.use_rest: + return + msg = None + if modify: + # prechecks when computing modify + if 'type' in modify['snaplock']: + msg = "Error: volume snaplock type was not set properly at creation time." if after_create else \ + "Error: changing a volume snaplock type after creation is not allowed." + msg += ' Current: %s, desired: %s.' % (current['snaplock']['type'], self.parameters['snaplock']['type']) + elif self.parameters['state'] == 'present': + # prechecks before computing modify + sl_dict = self.na_helper.filter_out_none_entries(self.parameters.get('snaplock', {})) + sl_type = sl_dict.pop('type', 'non_snaplock') + # verify type is the only option when not enabling snaplock compliance or enterprise + if sl_dict and ( + (current is None and sl_type == 'non_snaplock') or (current and current['snaplock']['type'] == 'non_snaplock')): + msg = "Error: snaplock options are not supported for non_snaplock volume, found: %s." % sl_dict + # verify type is not used before 9.10.1, or allow non_snaplock as this is the default + if not self.rest_api.meets_rest_minimum_version(True, 9, 10, 1): + if sl_type == 'non_snaplock': + self.parameters.pop('snaplock', None) + else: + msg = "Error: %s" % self.rest_api.options_require_ontap_version('snaplock type', '9.10.1', True) + if msg: + self.module.fail_json(msg=msg) + + def set_modify_dict(self, current, after_create=False): + '''Fill modify dict with changes''' + octal_value = current.get('unix_permissions') if current else None + if self.parameters.get('unix_permissions') is not None and self.na_helper.compare_chmod_value(octal_value, self.parameters['unix_permissions']): + # don't change if the values are the same + # can't change permissions if not online + del self.parameters['unix_permissions'] + # snapshot_auto_delete's value is a dict, get_modified_attributes function doesn't support dict as value. + auto_delete_info = current.pop('snapshot_auto_delete', None) + # ignore small changes in volume size or inode maximum by adjusting self.parameters['size'] or self.parameters['max_files'] + self.adjust_sizes(current, after_create) + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if modify is not None and 'type' in modify: + msg = "Error: volume type was not set properly at creation time." if after_create else \ + "Error: changing a volume from one type to another is not allowed." + msg += ' Current: %s, desired: %s.' % (current['type'], self.parameters['type']) + self.module.fail_json(msg=msg) + if modify is not None and 'snaplock' in modify: + self.validate_snaplock_changes(current, modify, after_create) + desired_style = self.get_volume_style(None) + if desired_style is not None and desired_style != self.volume_style: + msg = "Error: volume backend was not set properly at creation time." if after_create else \ + "Error: changing a volume from one backend to another is not allowed." + msg += ' Current: %s, desired: %s.' % (self.volume_style, desired_style) + self.module.fail_json(msg=msg) + desired_tcontrol = self.na_helper.safe_get(self.parameters, ['nas_application_template', 'tiering', 'control']) + if desired_tcontrol in ('required', 'disallowed'): + warn_or_fail = netapp_utils.get_feature(self.module, 'warn_or_fail_on_fabricpool_backend_change') + if warn_or_fail in ('warn', 'fail'): + current_tcontrol = self.tiering_control(current) + if desired_tcontrol != current_tcontrol: + msg = "Error: volume tiering control was not set properly at creation time." if after_create else \ + "Error: changing a volume from one backend to another is not allowed." + msg += ' Current tiering control: %s, desired: %s.' % (current_tcontrol, desired_tcontrol) + if warn_or_fail == 'fail': + self.module.fail_json(msg=msg) + self.module.warn("Ignored " + msg) + elif warn_or_fail not in (None, 'ignore'): + self.module.warn("Unexpected value '%s' for warn_or_fail_on_fabricpool_backend_change, expecting: None, 'ignore', 'fail', 'warn'" + % warn_or_fail) + if self.parameters.get('snapshot_auto_delete') is not None: + auto_delete_modify = self.na_helper.get_modified_attributes(auto_delete_info, + self.parameters['snapshot_auto_delete']) + if len(auto_delete_modify) > 0: + modify['snapshot_auto_delete'] = auto_delete_modify + return modify + + def take_modify_actions(self, modify): + self.modify_volume(modify) + + if any(modify.get(key) is not None for key in self.sis_keys2zapi_get): + if self.parameters.get('is_infinite') or self.volume_style == 'flexgroup': + efficiency_config_modify = 'async' + else: + efficiency_config_modify = 'sync' + self.modify_volume_efficiency_config(efficiency_config_modify) + + # offline volume last + if modify.get('is_online') is False: + self.change_volume_state() + + """ MAPPING OF VOLUME FIELDS FROM ZAPI TO REST + ZAPI = REST + encrypt = encryption.enabled + volume-comp-aggr-attributes.tiering-policy = tiering.policy + 'volume-export-attributes.policy' = nas.export_policy.name + 'volume-id-attributes.containing-aggregate-name' = aggregates.name + 'volume-id-attributes.flexgroup-uuid' = uuid (Only for FlexGroup volumes) + 'volume-id-attributes.instance-uuid' = uuid (Only for FlexVols) + 'volume-id-attributes.junction-path' = nas.path + 'volume-id-attributes.style-extended' = style + 'volume-id-attributes.type' = type + 'volume-id-attributes.comment' = comment + 'volume-performance-attributes.is-atime-update-enabled' == NO REST VERSION + volume-qos-attributes.policy-group-name' = qos.policy.name + 'volume-qos-attributes.adaptive-policy-group-name' = qos.policy.name + 'volume-security-attributes.style = nas.security_style + volume-security-attributes.volume-security-unix-attributes.group-id' = nas.gid + 'volume-security-attributes.volume-security-unix-attributes.permissions' = nas.unix_permissions + 'volume-security-attributes.volume-security-unix-attributes.user-id' = nas.uid + 'volume-snapshot-attributes.snapdir-access-enabled' == NO REST VERSION + 'volume-snapshot-attributes,snapshot-policy' = snapshot_policy + volume-space-attributes.percentage-snapshot-reserve = space.snapshot.reserve_percent + volume-space-attributes.size' = space.size + 'volume-space-attributes.space-guarantee' = guarantee.type + volume-space-attributes.space-slo' == NO REST VERSION + 'volume-state-attributes.is-nvfail-enabled' == NO REST Version + 'volume-state-attributes.state' = state + 'volume-vserver-dr-protection-attributes.vserver-dr-protection' = == NO REST Version + volume-snapshot-autodelete-attributes.* None exist other than space.snapshot.autodelete_enabled + From get_efficiency_info function + efficiency_policy = efficiency.policy.name + compression = efficiency.compression + inline_compression = efficiency.compression + """ + + def get_volume_rest(self, vol_name): + """ + This covers the zapi functions + get_volume + - volume_get_iter + - get_efficiency_info + """ + api = 'storage/volumes' + params = {'name': vol_name, + 'svm.name': self.parameters['vserver'], + 'fields': 'encryption.enabled,' + 'tiering.policy,' + 'nas.export_policy.name,' + 'aggregates.name,' + 'aggregates.uuid,' + 'uuid,' + 'nas.path,' + 'style,' + 'type,' + 'comment,' + 'qos.policy.name,' + 'nas.security_style,' + 'nas.gid,' + 'nas.unix_permissions,' + 'nas.uid,' + 'snapshot_policy,' + 'space.snapshot.reserve_percent,' + 'space.size,' + 'guarantee.type,' + 'state,' + 'efficiency.compression,' + 'snaplock,' + 'files.maximum,' + 'space.logical_space.enforcement,' + 'space.logical_space.reporting,'} + if self.parameters.get('efficiency_policy'): + params['fields'] += 'efficiency.policy.name,' + if self.parameters.get('tiering_minimum_cooling_days'): + params['fields'] += 'tiering.min_cooling_days,' + if self.parameters.get('analytics'): + params['fields'] += 'analytics,' + if self.parameters.get('tags'): + params['fields'] += '_tags,' + + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg=error) + return self.format_get_volume_rest(record) if record else None + + def rename_volume_rest(self): + # volume-rename-async and volume-rename are the same in rest + # Zapi you had to give the old and new name to change a volume. + # Rest you need the old UUID, and the new name only + current = self.get_volume_rest(self.parameters['from_name']) + body = { + 'name': self.parameters['name'] + } + dummy, error = self.volume_rest_patch(body, uuid=current['uuid']) + if error: + self.module.fail_json(msg='Error changing name of volume %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def snapshot_restore_volume_rest(self): + # Rest does not have force_restore or preserve_lun_id + current = self.get_volume() + self.parameters['uuid'] = current['uuid'] + body = { + 'restore_to.snapshot.name': self.parameters['snapshot_restore'] + } + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error restoring snapshot %s in volume %s: %s' % ( + self.parameters['snapshot_restore'], + self.parameters['name'], + to_native(error)), exception=traceback.format_exc()) + + def create_volume_rest(self): + body = self.create_volume_body_rest() + dummy, error = rest_generic.post_async(self.rest_api, 'storage/volumes', body, job_timeout=self.parameters['time_out']) + if error: + self.module.fail_json(msg='Error creating volume %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def create_volume_body_rest(self): + body = { + 'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'] + } + # Zapi's Space-guarantee and space-reserve are the same thing in Rest + if self.parameters.get('space_guarantee') is not None: + body['guarantee.type'] = self.parameters['space_guarantee'] + # TODO: Check to see if there a difference in rest between flexgroup or not. might need to throw error + body = self.aggregates_rest(body) + if self.parameters.get('tags') is not None: + body['_tags'] = self.parameters['tags'] + if self.parameters.get('size') is not None: + body['size'] = self.parameters['size'] + if self.parameters.get('snapshot_policy') is not None: + body['snapshot_policy.name'] = self.parameters['snapshot_policy'] + if self.parameters.get('unix_permissions') is not None: + body['nas.unix_permissions'] = self.parameters['unix_permissions'] + if self.parameters.get('group_id') is not None: + body['nas.gid'] = self.parameters['group_id'] + if self.parameters.get('user_id') is not None: + body['nas.uid'] = self.parameters['user_id'] + if self.parameters.get('volume_security_style') is not None: + body['nas.security_style'] = self.parameters['volume_security_style'] + if self.parameters.get('export_policy') is not None: + body['nas.export_policy.name'] = self.parameters['export_policy'] + if self.parameters.get('junction_path') is not None: + body['nas.path'] = self.parameters['junction_path'] + if self.parameters.get('comment') is not None: + body['comment'] = self.parameters['comment'] + if self.parameters.get('type') is not None: + body['type'] = self.parameters['type'] + if self.parameters.get('percent_snapshot_space') is not None: + body['space.snapshot.reserve_percent'] = self.parameters['percent_snapshot_space'] + if self.parameters.get('language') is not None: + body['language'] = self.parameters['language'] + if self.get_qos_policy_group() is not None: + body['qos.policy.name'] = self.get_qos_policy_group() + if self.parameters.get('tiering_policy') is not None: + body['tiering.policy'] = self.parameters['tiering_policy'] + if self.parameters.get('encrypt') is not None: + body['encryption.enabled'] = self.parameters['encrypt'] + if self.parameters.get('logical_space_enforcement') is not None: + body['space.logical_space.enforcement'] = self.parameters['logical_space_enforcement'] + if self.parameters.get('logical_space_reporting') is not None: + body['space.logical_space.reporting'] = self.parameters['logical_space_reporting'] + if self.parameters.get('tiering_minimum_cooling_days') is not None: + body['tiering.min_cooling_days'] = self.parameters['tiering_minimum_cooling_days'] + if self.parameters.get('snaplock') is not None: + body['snaplock'] = self.na_helper.filter_out_none_entries(self.parameters['snaplock']) + if self.volume_style: + body['style'] = self.volume_style + if self.parameters.get('efficiency_policy') is not None: + body['efficiency.policy.name'] = self.parameters['efficiency_policy'] + if self.get_compression(): + body['efficiency.compression'] = self.get_compression() + if self.parameters.get('analytics'): + body['analytics.state'] = self.parameters['analytics'] + body['state'] = self.bool_to_online(self.parameters['is_online']) + return body + + def aggregates_rest(self, body): + if self.parameters.get('aggregate_name') is not None: + body['aggregates'] = [{'name': self.parameters['aggregate_name']}] + if self.parameters.get('aggr_list') is not None: + body['aggregates'] = [{'name': name} for name in self.parameters['aggr_list']] + if self.parameters.get('aggr_list_multiplier') is not None: + body['constituents_per_aggregate'] = self.parameters['aggr_list_multiplier'] + return body + + def volume_modify_attributes_rest(self, params): + body = self.modify_volume_body_rest(params) + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error modifying volume %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + @staticmethod + def bool_to_online(item): + return 'online' if item else 'offline' + + def modify_volume_body_rest(self, params): + body = {} + for key, option, transform in [ + ('analytics.state', 'analytics', None), + ('guarantee.type', 'space_guarantee', None), + ('space.snapshot.reserve_percent', 'percent_snapshot_space', None), + ('snapshot_policy.name', 'snapshot_policy', None), + ('nas.export_policy.name', 'export_policy', None), + ('nas.unix_permissions', 'unix_permissions', None), + ('nas.gid', 'group_id', None), + ('nas.uid', 'user_id', None), + # only one of these 2 options for QOS policy can be defined at most + ('qos.policy.name', 'qos_policy_group', None), + ('qos.policy.name', 'qos_adaptive_policy_group', None), + ('comment', 'comment', None), + ('space.logical_space.enforcement', 'logical_space_enforcement', None), + ('space.logical_space.reporting', 'logical_space_reporting', None), + ('tiering.min_cooling_days', 'tiering_minimum_cooling_days', None), + ('state', 'is_online', self.bool_to_online), + ('_tags', 'tags', None) + ]: + value = self.parameters.get(option) + if value is not None and transform: + value = transform(value) + if value is not None: + body[key] = value + + # not too sure why we don't always set them + # one good reason are fields that are not supported on all releases + for key, option, transform in [ + ('nas.security_style', 'volume_security_style', None), + ('tiering.policy', 'tiering_policy', None), + ('files.maximum', 'max_files', None), + ]: + if params and params.get(option) is not None: + body[key] = self.parameters[option] + + if params and params.get('snaplock') is not None: + sl_dict = self.na_helper.filter_out_none_entries(self.parameters['snaplock']) or {} + # type is not allowed in patch, and we already prevented any change in type + sl_dict.pop('type', None) + if sl_dict: + body['snaplock'] = sl_dict + return body + + def change_volume_state_rest(self): + body = { + 'state': self.bool_to_online(self.parameters['is_online']), + } + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error changing state of volume %s: %s' % (self.parameters['name'], + to_native(error)), + exception=traceback.format_exc()) + return body['state'], None + + def volume_unmount_rest(self, fail_on_error=True): + body = { + 'nas.path': '', + } + dummy, error = self.volume_rest_patch(body) + if error and fail_on_error: + self.module.fail_json(msg='Error unmounting volume %s with path "%s": %s' % (self.parameters['name'], + self.parameters.get('junction_path'), + to_native(error)), + exception=traceback.format_exc()) + return error + + def volume_mount_rest(self): + body = { + 'nas.path': self.parameters['junction_path'] + } + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error mounting volume %s with path "%s": %s' % (self.parameters['name'], + self.parameters['junction_path'], + to_native(error)), + exception=traceback.format_exc()) + + def set_efficiency_rest(self): + body = {} + if self.parameters.get('efficiency_policy') is not None: + body['efficiency.policy.name'] = self.parameters['efficiency_policy'] + if self.get_compression(): + body['efficiency.compression'] = self.get_compression() + if not body: + return + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error setting efficiency for volume %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def encryption_conversion_rest(self): + # volume-encryption-conversion-start + # Set the "encryption.enabled" field to "true" to start the encryption conversion operation. + body = { + 'encryption.enabled': True + } + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error enabling encryption for volume %s: %s' % (self.parameters['name'], + to_native(error)), + exception=traceback.format_exc()) + if self.parameters.get('wait_for_completion'): + self.wait_for_volume_encryption_conversion_rest() + + def resize_volume_rest(self): + query = None + if self.parameters.get('sizing_method') is not None: + query = dict(sizing_method=self.parameters['sizing_method']) + body = { + 'size': self.parameters['size'] + } + dummy, error = self.volume_rest_patch(body, query) + if error: + self.module.fail_json(msg='Error resizing volume %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def move_volume_rest(self, encrypt_destination): + body = { + 'movement.destination_aggregate.name': self.parameters['aggregate_name'], + } + if encrypt_destination is not None: + body['encryption.enabled'] = encrypt_destination + dummy, error = self.volume_rest_patch(body) + if error: + self.module.fail_json(msg='Error moving volume %s: %s' % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + if self.parameters.get('wait_for_completion'): + self.wait_for_volume_move_rest() + + def volume_rest_patch(self, body, query=None, uuid=None): + if not uuid: + uuid = self.parameters['uuid'] + if not uuid: + self.module.fail_json(msg='Could not read UUID for volume %s in patch.' % self.parameters['name']) + return rest_generic.patch_async(self.rest_api, 'storage/volumes', uuid, body, query=query, job_timeout=self.parameters['time_out']) + + def get_qos_policy_group(self): + if self.parameters.get('qos_policy_group') is not None: + return self.parameters['qos_policy_group'] + if self.parameters.get('qos_adaptive_policy_group') is not None: + return self.parameters['qos_adaptive_policy_group'] + return None + + def get_compression(self): + if self.parameters.get('compression') and self.parameters.get('inline_compression'): + return 'both' + if self.parameters.get('compression'): + return 'background' + if self.parameters.get('inline_compression'): + return 'inline' + if self.parameters.get('compression') is False and self.parameters.get('inline_compression') is False: + return 'none' + return None + + def rest_errors(self): + # For variable that have been merged together we should fail before we do anything + if self.parameters.get('qos_policy_group') and self.parameters.get('qos_adaptive_policy_group'): + self.module.fail_json(msg='Error: With Rest API qos_policy_group and qos_adaptive_policy_group are now ' + 'the same thing, and cannot be set at the same time') + + ontap_97_options = ['nas_application_template'] + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 7) and any(x in self.parameters for x in ontap_97_options): + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version(ontap_97_options, version='9.7')) + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9) and\ + self.na_helper.safe_get(self.parameters, ['nas_application_template', 'flexcache', 'dr_cache']) is not None: + self.module.fail_json(msg='Error: %s' % self.rest_api.options_require_ontap_version('flexcache: dr_cache', version='9.9')) + + def format_get_volume_rest(self, record): + is_online = record.get('state') == 'online' + # TODO FIX THIS!!!! ZAPI would only return a single aggr, REST can return more than 1. + # For now i'm going to hard code this, but we need a way to show all aggrs + aggregates = record.get('aggregates', None) + aggr_name = aggregates[0].get('name', None) if aggregates else None + rest_compression = self.na_helper.safe_get(record, ['efficiency', 'compression']) + junction_path = self.na_helper.safe_get(record, ['nas', 'path']) + if junction_path is None: + junction_path = '' + # if analytics.state is initializing it will be ON once completed. + state = self.na_helper.safe_get(record, ['analytics', 'state']) + analytics = 'on' if state == 'initializing' else state + return { + 'tags': record.get('_tags', []), + 'name': record.get('name', None), + 'analytics': analytics, + 'encrypt': self.na_helper.safe_get(record, ['encryption', 'enabled']), + 'tiering_policy': self.na_helper.safe_get(record, ['tiering', 'policy']), + 'export_policy': self.na_helper.safe_get(record, ['nas', 'export_policy', 'name']), + 'aggregate_name': aggr_name, + 'aggregates': aggregates, + 'flexgroup_uuid': record.get('uuid', None), # this might need some additional logic + 'instance_uuid': record.get('uuid', None), # this might need some additional logic + 'junction_path': junction_path, + 'style_extended': record.get('style', None), + 'type': record.get('type', None), + 'comment': record.get('comment', None), + 'qos_policy_group': self.na_helper.safe_get(record, ['qos', 'policy', 'name']), + 'qos_adaptive_policy_group': self.na_helper.safe_get(record, ['qos', 'policy', 'name']), + 'volume_security_style': self.na_helper.safe_get(record, ['nas', 'security_style']), + 'group_id': self.na_helper.safe_get(record, ['nas', 'gid']), + # Rest return an Int while Zapi return a string, force Rest to be an String + 'unix_permissions': str(self.na_helper.safe_get(record, ['nas', 'unix_permissions'])), + 'user_id': self.na_helper.safe_get(record, ['nas', 'uid']), + 'snapshot_policy': self.na_helper.safe_get(record, ['snapshot_policy', 'name']), + 'percent_snapshot_space': self.na_helper.safe_get(record, ['space', 'snapshot', 'reserve_percent']), + 'size': self.na_helper.safe_get(record, ['space', 'size']), + 'space_guarantee': self.na_helper.safe_get(record, ['guarantee', 'type']), + 'is_online': is_online, + 'uuid': record.get('uuid', None), + 'efficiency_policy': self.na_helper.safe_get(record, ['efficiency', 'policy', 'name']), + 'compression': rest_compression in ('both', 'background'), + 'inline_compression': rest_compression in ('both', 'inline'), + 'logical_space_enforcement': self.na_helper.safe_get(record, ['space', 'logical_space', 'enforcement']), + 'logical_space_reporting': self.na_helper.safe_get(record, ['space', 'logical_space', 'reporting']), + 'tiering_minimum_cooling_days': self.na_helper.safe_get(record, ['tiering', 'min_cooling_days']), + 'snaplock': self.na_helper.safe_get(record, ['snaplock']), + 'max_files': self.na_helper.safe_get(record, ['files', 'maximum']), + + } + + def is_fabricpool(self, name, aggregate_uuid): + '''whether the aggregate is associated with one or more object stores''' + api = 'storage/aggregates/%s/cloud-stores' % aggregate_uuid + records, error = rest_generic.get_0_or_more_records(self.rest_api, api) + if error: + self.module.fail_json(msg="Error getting object store for aggregate: %s: %s" % (name, error)) + return records is not None and len(records) > 0 + + def tiering_control(self, current): + '''return whether the backend meets FabricPool requirements: + required: all aggregates are in a FabricPool + disallowed: all aggregates are not in a FabricPool + best_effort: mixed + ''' + fabricpools = [self.is_fabricpool(aggregate['name'], aggregate['uuid']) + for aggregate in current.get('aggregates', [])] + if not fabricpools: + return None + if all(fabricpools): + return 'required' + if any(fabricpools): + return 'best_effort' + return 'disallowed' + + def set_actions(self): + """define what needs to be done""" + actions = [] + modify = {} + + current = self.get_volume() + self.volume_style = self.get_volume_style(current) + if self.volume_style == 'flexgroup' and self.parameters.get('aggregate_name') is not None: + self.module.fail_json(msg='Error: aggregate_name option cannot be used with FlexGroups.') + + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action == 'delete' or self.parameters['state'] == 'absent': + return ['delete'] if cd_action == 'delete' else [], current, modify + if cd_action == 'create': + # report an error if the vserver does not exist (it can be also be a cluster or node vserver with REST) + if self.use_rest: + rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + actions = ['create'] + if self.parameters.get('from_name'): + # create by renaming + current = self.get_volume(self.parameters['from_name']) + rename = self.na_helper.is_rename_action(current, None) + if rename is None: + self.module.fail_json(msg="Error renaming volume: cannot find %s" % self.parameters['from_name']) + if rename: + cd_action = None + actions = ['rename'] + elif self.parameters.get('from_vserver'): + # create by rehosting + if self.use_rest: + self.module.fail_json(msg='Error: ONTAP REST API does not support Rehosting Volumes') + actions = ['rehost'] + self.na_helper.changed = True + if self.parameters.get('snapshot_restore'): + # update by restoring + if 'create' in actions: + self.module.fail_json(msg="Error restoring volume: cannot find parent: %s" % self.parameters['name']) + # let's allow restoring after a rename or rehost + actions.append('snapshot_restore') + self.na_helper.changed = True + self.validate_snaplock_changes(current) + if cd_action is None and 'rehost' not in actions: + # Ignoring modify after a rehost, as we can't read the volume properties on the remote volume + # or maybe we could, using a cluster ZAPI, but since ZAPI is going away, is it worth it? + modify = self.set_modify_dict(current) + if modify: + # ZAPI decrypts volume using volume move api and aggregate name is required. + if not self.use_rest and modify.get('encrypt') is False and not self.parameters.get('aggregate_name'): + self.parameters['aggregate_name'] = current['aggregate_name'] + if self.use_rest and modify.get('encrypt') is False and not modify.get('aggregate_name'): + self.module.fail_json(msg="Error: unencrypting volume is only supported when moving the volume to another aggregate in REST.") + actions.append('modify') + if self.parameters.get('nas_application_template') is not None: + application = self.get_application() + changed = self.na_helper.changed + app_component = self.create_nas_application_component() if self.parameters['state'] == 'present' else None + modify_app = self.na_helper.get_modified_attributes(application, app_component) + # restore current change state, as we ignore this + if modify_app: + self.na_helper.changed = changed + self.module.warn('Modifying an app is not supported at present: ignoring: %s' % str(modify_app)) + return actions, current, modify + + def apply(self): + '''Call create/modify/delete operations''' + actions, current, modify = self.set_actions() + is_online = current.get('is_online') if current else None + response = None + + # rehost, snapshot_restore and modify actions requires volume state to be online. + online_modify_options = [x for x in actions if x in ['rehost', 'snapshot_restore', 'modify']] + # ignore options that requires volume shoule be online. + if not modify.get('is_online') and is_online is False and online_modify_options: + modify_keys = [] + if 'modify' in online_modify_options: + online_modify_options.remove('modify') + modify_keys = [key for key in modify if key != 'is_online'] + action_msg = 'perform action(s): %s' % online_modify_options if online_modify_options else '' + modify_msg = ' and modify: %s' % modify_keys if action_msg else 'modify: %s' % modify_keys + self.module.warn("Cannot %s%s when volume is offline." % (action_msg, modify_msg)) + modify, actions = {}, [] + if 'rename' in actions: + # rename can be done if volume is offline. + actions = ['rename'] + else: + self.na_helper.changed = False + + if self.na_helper.changed and not self.module.check_mode: + # always online volume first before other changes. + # rehost, snapshot_restore and modify requires volume in online state. + if modify.get('is_online'): + self.parameters['uuid'] = current['uuid'] + # when moving to online, include parameters that get does not return when volume is offline + for field in ['volume_security_style', 'group_id', 'user_id', 'percent_snapshot_space']: + if self.parameters.get(field) is not None: + modify[field] = self.parameters[field] + self.change_volume_state() + if 'rename' in actions: + self.rename_volume() + if 'rehost' in actions: + # REST DOES NOT have a volume-rehost equivalent + self.rehost_volume() + if 'snapshot_restore' in actions: + self.snapshot_restore_volume() + if 'create' in actions: + response = self.create_volume() + # if we create using ZAPI and modify only options are set (snapdir_access or atime_update), we need to run a modify. + # The modify also takes care of efficiency (sis) parameters and snapshot_auto_delete. + # If we create using REST application, some options are not available, we may need to run a modify. + # volume should be online for modify. + current = self.get_volume() + if current: + self.volume_created = True + modify = self.set_modify_dict(current, after_create=True) + is_online = current.get('is_online') + if modify: + if is_online: + actions.append('modify') + else: + self.module.warn("Cannot perform actions: modify when volume is offline.") + # restore this, as set_modify_dict could set it to False + self.na_helper.changed = True + if 'delete' in actions: + self.parameters['uuid'] = current['uuid'] + self.delete_volume(current) + if 'modify' in actions: + self.parameters['uuid'] = current['uuid'] + self.take_modify_actions(modify) + + result = netapp_utils.generate_result(self.na_helper.changed, actions, modify, response) + self.module.exit_json(**result) + + +def main(): + '''Apply volume operations from playbook''' + obj = NetAppOntapVolume() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py new file mode 100644 index 000000000..0b40c5d45 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_autosize.py @@ -0,0 +1,353 @@ +#!/usr/bin/python + +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_volume_autosize +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: na_ontap_volume_autosize +short_description: NetApp ONTAP manage volume autosize +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Modify Volume AutoSize +options: + volume: + description: + - The name of the flexible volume for which we want to set autosize. + type: str + required: true + + mode: + description: + - Specify the flexible volume's autosize mode of operation. + type: str + choices: ['grow', 'grow_shrink', 'off'] + + vserver: + description: + - Name of the vserver to use. + required: true + type: str + + grow_threshold_percent: + description: + - Specifies the percentage of the flexible volume's capacity at which autogrow is initiated. + - The default grow threshold varies from 85% to 98%, depending on the volume size. + - It is an error for the grow threshold to be less than or equal to the shrink threshold. + - Range between 0 and 100 + type: int + + increment_size: + description: + - Specify the flexible volume's increment size using the following format < number > [k|m|g|t] + - The amount is the absolute size to set. + - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively). + type: str + + maximum_size: + description: + - Specify the flexible volume's maximum allowed size using the following format < number > [k|m|g|t] + - The amount is the absolute size to set. + - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively). + - The default value is 20% greater than the volume size at the time autosize was enabled. + - It is an error for the maximum volume size to be less than the current volume size. + - It is also an error for the maximum size to be less than or equal to the minimum size. + type: str + + minimum_size: + description: + - Specify the flexible volume's minimum allowed size using the following format < number > [k|m|g|t] The amount is the absolute size to set. + - The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively). + - The default value is the size of the volume at the time the 'grow_shrink' mode was enabled. + - It is an error for the minimum size to be greater than or equal to the maximum size. + type: str + + reset: + description: + - "Sets the values of maximum_size, increment_size, minimum_size, grow_threshold_percent, shrink_threshold_percent and mode to their defaults" + - If reset paramater is present system will always perform reset action, so idempotency is not supported. + type: bool + + shrink_threshold_percent: + description: + - Specifies the percentage of the flexible volume's capacity at which autoshrink is initiated. + - The default shrink theshold is 50%. It is an error for the shrink threshold to be greater than or equal to the grow threshold. + - Range between 0 and 100 + type: int +''' + +EXAMPLES = """ + - name: Modify volume autosize + netapp.ontap.na_ontap_volume_autosize: + hostname: 10.193.79.189 + username: admin + password: netapp1! + volume: ansibleVolumesize12 + mode: grow + grow_threshold_percent: 99 + increment_size: 50m + maximum_size: 10g + minimum_size: 21m + shrink_threshold_percent: 40 + vserver: ansible_vserver + + - name: Reset volume autosize + netapp.ontap.na_ontap_volume_autosize: + hostname: 10.193.79.189 + username: admin + password: netapp1! + volume: ansibleVolumesize12 + reset: true + vserver: ansible_vserver +""" + +RETURN = """ +""" +import copy +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapVolumeAutosize: + ''' volume autosize configuration ''' + def __init__(self): + self.use_rest = False + # Volume_autosize returns KB and not B like Volume so values are shifted down 1 + self._size_unit_map = dict( + k=1, + m=1024, + g=1024 ** 2, + t=1024 ** 3, + ) + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + volume=dict(required=True, type="str"), + mode=dict(required=False, choices=['grow', 'grow_shrink', 'off']), + vserver=dict(required=True, type='str'), + grow_threshold_percent=dict(required=False, type='int'), + increment_size=dict(required=False, type='str'), + maximum_size=dict(required=False, type='str'), + minimum_size=dict(required=False, type='str'), + reset=dict(required=False, type='bool'), + shrink_threshold_percent=dict(required=False, type='int') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['reset', 'maximum_size'], + ['reset', 'increment_size'], + ['reset', 'minimum_size'], + ['reset', 'grow_threshold_percent'], + ['reset', 'shrink_threshold_percent'], + ['reset', 'mode'] + ] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + # API should be used for ONTAP 9.6 or higher, ZAPI for lower version + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + # increment size and reset are not supported with rest api + if self.parameters.get('increment_size'): + self.module.fail_json(msg="Rest API does not support increment size, please switch to ZAPI") + if self.parameters.get('reset'): + self.module.fail_json(msg="Rest API does not support reset, please switch to ZAPI") + else: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_volume_autosize(self): + """ + Get volume_autosize information from the ONTAP system + :return: + """ + if self.use_rest: + query = { + 'name': self.parameters['volume'], + 'svm.name': self.parameters['vserver'], + 'fields': 'autosize,uuid' + } + api = 'storage/volumes' + response, error = rest_generic.get_one_record(self.rest_api, api, query) + if error is not None: + self.module.fail_json(msg='Error fetching volume autosize info for %s: %s' % (self.parameters['volume'], error)) + if response: + return self._create_get_volume_return(response['autosize'], response['uuid']) + self.module.fail_json(msg='Error fetching volume autosize info for %s: volume not found for vserver %s.' + % (self.parameters['volume'], self.parameters['vserver'])) + else: + volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-get') + volume_autosize_info.add_new_child('volume', self.parameters['volume']) + try: + result = self.server.invoke_successfully(volume_autosize_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching volume autosize info for %s: %s.' % (self.parameters['volume'], to_native(error)), + exception=traceback.format_exc()) + return self._create_get_volume_return(result) + + def _create_get_volume_return(self, results, uuid=None): + """ + Create a return value from volume-autosize-get info file + :param results: + :return: + """ + return_value = {} + if self.use_rest: + return_value['uuid'] = uuid + if 'mode' in results: + return_value['mode'] = results['mode'] + if 'grow_threshold' in results: + return_value['grow_threshold_percent'] = results['grow_threshold'] + if 'maximum' in results: + return_value['maximum_size'] = results['maximum'] + if 'minimum' in results: + return_value['minimum_size'] = results['minimum'] + if 'shrink_threshold' in results: + return_value['shrink_threshold_percent'] = results['shrink_threshold'] + else: + if results.get_child_by_name('mode'): + return_value['mode'] = results.get_child_content('mode') + if results.get_child_by_name('grow-threshold-percent'): + return_value['grow_threshold_percent'] = int(results.get_child_content('grow-threshold-percent')) + if results.get_child_by_name('increment-size'): + return_value['increment_size'] = results.get_child_content('increment-size') + if results.get_child_by_name('maximum-size'): + return_value['maximum_size'] = results.get_child_content('maximum-size') + if results.get_child_by_name('minimum-size'): + return_value['minimum_size'] = results.get_child_content('minimum-size') + if results.get_child_by_name('shrink-threshold-percent'): + return_value['shrink_threshold_percent'] = int(results.get_child_content('shrink-threshold-percent')) + if not return_value: + return_value = None + return return_value + + def modify_volume_autosize(self, uuid): + """ + Modify a Volumes autosize + :return: + """ + if self.use_rest: + autosize = {} + if self.parameters.get('mode'): + autosize['mode'] = self.parameters['mode'] + if self.parameters.get('grow_threshold_percent'): + autosize['grow_threshold'] = self.parameters['grow_threshold_percent'] + if self.parameters.get('maximum_size'): + autosize['maximum'] = self.parameters['maximum_size'] + if self.parameters.get('minimum_size'): + autosize['minimum'] = self.parameters['minimum_size'] + if self.parameters.get('shrink_threshold_percent'): + autosize['shrink_threshold'] = self.parameters['shrink_threshold_percent'] + if not autosize: + return + api = 'storage/volumes' + body = {'autosize': autosize} + dummy, error = rest_generic.patch_async(self.rest_api, api, uuid, body) + if error is not None: + self.module.fail_json(msg="Error modifying volume autosize for %s: %s" % (self.parameters["volume"], error)) + + else: + volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-set') + volume_autosize_info.add_new_child('volume', self.parameters['volume']) + if self.parameters.get('mode'): + volume_autosize_info.add_new_child('mode', self.parameters['mode']) + if self.parameters.get('grow_threshold_percent'): + volume_autosize_info.add_new_child('grow-threshold-percent', str(self.parameters['grow_threshold_percent'])) + if self.parameters.get('increment_size'): + volume_autosize_info.add_new_child('increment-size', self.parameters['increment_size']) + if self.parameters.get('reset') is not None: + volume_autosize_info.add_new_child('reset', str(self.parameters['reset'])) + if self.parameters.get('maximum_size'): + volume_autosize_info.add_new_child('maximum-size', self.parameters['maximum_size']) + if self.parameters.get('minimum_size'): + volume_autosize_info.add_new_child('minimum-size', self.parameters['minimum_size']) + if self.parameters.get('shrink_threshold_percent'): + volume_autosize_info.add_new_child('shrink-threshold-percent', str(self.parameters['shrink_threshold_percent'])) + try: + self.server.invoke_successfully(volume_autosize_info, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error modifying volume autosize for %s: %s." % (self.parameters["volume"], to_native(error)), + exception=traceback.format_exc()) + + def modify_to_kb(self, converted_parameters): + """ + Save a coverted parameter + :param converted_parameters: Dic of all parameters + :return: + """ + for attr in ['maximum_size', 'minimum_size', 'increment_size']: + if converted_parameters.get(attr) is not None: + if self.use_rest: + converted_parameters[attr] = self.convert_to_byte(attr, converted_parameters) + else: + converted_parameters[attr] = str(self.convert_to_kb(attr, converted_parameters)) + return converted_parameters + + def convert_to_kb(self, variable, converted_parameters): + """ + Convert a number 10m in to its correct KB size + :param variable: the Parameter we are going to covert + :param converted_parameters: Dic of all parameters + :return: + """ + value = converted_parameters.get(variable) + if len(value) < 2: + self.module.fail_json(msg="%s must start with a number, and must end with a k, m, g or t, found '%s'." % (variable, value)) + if value[-1] not in ['k', 'm', 'g', 't']: + self.module.fail_json(msg="%s must end with a k, m, g or t, found %s in %s." % (variable, value[-1], value)) + try: + digits = int(value[:-1]) + except ValueError: + self.module.fail_json(msg="%s must start with a number, found %s in %s." % (variable, value[:-1], value)) + return self._size_unit_map[value[-1]] * digits + + def convert_to_byte(self, variable, converted_parameters): + return self.convert_to_kb(variable, converted_parameters) * 1024 + + def apply(self): + current = self.get_volume_autosize() + converted_parameters = copy.deepcopy(self.parameters) + converted_parameters = self.modify_to_kb(converted_parameters) + self.na_helper.get_modified_attributes(current, converted_parameters) + if self.parameters.get('reset') is True: + self.na_helper.changed = True + if self.na_helper.changed and not self.module.check_mode: + uuid = current.get('uuid') if current else None + self.modify_volume_autosize(uuid=uuid) + + self.module.exit_json(changed=self.na_helper.changed) + + +def main(): + """ + Apply volume autosize operations from playbook + :return: + """ + obj = NetAppOntapVolumeAutosize() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py new file mode 100644 index 000000000..a2b40e0b2 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_clone.py @@ -0,0 +1,355 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_volume_clone +short_description: NetApp ONTAP manage volume clones. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.6.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create NetApp ONTAP volume clones. +- A FlexClone License is required to use this module +options: + state: + description: + - Whether volume clone should be created. + choices: ['present'] + type: str + default: 'present' + parent_volume: + description: + - The parent volume of the volume clone being created. + required: true + type: str + name: + description: + - The name of the volume clone being created. + required: true + type: str + aliases: + - volume + vserver: + description: + - Vserver in which the volume clone should be created. + required: true + type: str + parent_snapshot: + description: + - Parent snapshot in which volume clone is created off. + type: str + parent_vserver: + description: + - Vserver of parent volume in which clone is created off. + type: str + qos_policy_group_name: + description: + - The qos-policy-group-name which should be set for volume clone. + type: str + space_reserve: + description: + - The space_reserve setting which should be used for the volume clone. + choices: ['volume', 'none'] + type: str + volume_type: + description: + - The volume-type setting which should be used for the volume clone. + choices: ['rw', 'dp'] + type: str + junction_path: + version_added: 2.8.0 + description: + - Junction path of the volume. + type: str + uid: + version_added: 2.9.0 + description: + - The UNIX user ID for the clone volume. + type: int + gid: + version_added: 2.9.0 + description: + - The UNIX group ID for the clone volume. + type: int + split: + version_added: '20.2.0' + description: + - Split clone volume from parent volume. + type: bool +''' + +EXAMPLES = """ + - name: create volume clone + na_ontap_volume_clone: + state: present + username: "{{ netapp username }}" + password: "{{ netapp password }}" + hostname: "{{ netapp hostname }}" + vserver: vs_hack + parent_volume: normal_volume + name: clone_volume_7 + space_reserve: none + parent_snapshot: backup1 + junction_path: /clone_volume_7 + uid: 1 + gid: 1 +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic +import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh + + +class NetAppONTAPVolumeClone: + """ + Creates a volume clone + """ + + def __init__(self): + """ + Initialize the NetAppOntapVolumeClone class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present'], default='present'), + parent_volume=dict(required=True, type='str'), + name=dict(required=True, type='str', aliases=["volume"]), + vserver=dict(required=True, type='str'), + parent_snapshot=dict(required=False, type='str', default=None), + parent_vserver=dict(required=False, type='str', default=None), + qos_policy_group_name=dict(required=False, type='str', default=None), + space_reserve=dict(required=False, type='str', choices=['volume', 'none'], default=None), + volume_type=dict(required=False, type='str', choices=['rw', 'dp']), + junction_path=dict(required=False, type='str', default=None), + uid=dict(required=False, type='int'), + gid=dict(required=False, type='int'), + split=dict(required=False, type='bool', default=None), + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_together=[ + ['uid', 'gid'] + ], + mutually_exclusive=[ + ('junction_path', 'parent_vserver'), + ('uid', 'parent_vserver'), + ('gid', 'parent_vserver') + ] + ) + + self.uuid = None # UUID if the FlexClone if it exists, or after creation + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['space_reserve'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if self.parameters.get('parent_vserver'): + # use cluster ZAPI, as vserver ZAPI does not support parent-vserser for create + self.create_server = netapp_utils.setup_na_ontap_zapi(module=self.module) + # keep vserver for ems log and clone-get + self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + else: + self.vserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + self.create_server = self.vserver + + def create_volume_clone(self): + """ + Creates a new volume clone + """ + if self.use_rest: + return self.create_volume_clone_rest() + clone_obj = netapp_utils.zapi.NaElement('volume-clone-create') + clone_obj.add_new_child("parent-volume", self.parameters['parent_volume']) + clone_obj.add_new_child("volume", self.parameters['name']) + if self.parameters.get('qos_policy_group_name'): + clone_obj.add_new_child("qos-policy-group-name", self.parameters['qos_policy_group_name']) + if self.parameters.get('space_reserve'): + clone_obj.add_new_child("space-reserve", self.parameters['space_reserve']) + if self.parameters.get('parent_snapshot'): + clone_obj.add_new_child("parent-snapshot", self.parameters['parent_snapshot']) + if self.parameters.get('parent_vserver'): + clone_obj.add_new_child("parent-vserver", self.parameters['parent_vserver']) + clone_obj.add_new_child("vserver", self.parameters['vserver']) + if self.parameters.get('volume_type'): + clone_obj.add_new_child("volume-type", self.parameters['volume_type']) + if self.parameters.get('junction_path'): + clone_obj.add_new_child("junction-path", self.parameters['junction_path']) + if self.parameters.get('uid'): + clone_obj.add_new_child("uid", str(self.parameters['uid'])) + clone_obj.add_new_child("gid", str(self.parameters['gid'])) + try: + self.create_server.invoke_successfully(clone_obj, True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error creating volume clone: %s: %s' % (self.parameters['name'], to_native(exc))) + + def modify_volume_clone(self): + """ + Modify an existing volume clone + """ + if 'split' in self.parameters and self.parameters['split']: + self.start_volume_clone_split() + + def start_volume_clone_split(self): + """ + Starts a volume clone split + """ + if self.use_rest: + return self.start_volume_clone_split_rest() + clone_obj = netapp_utils.zapi.NaElement('volume-clone-split-start') + clone_obj.add_new_child("volume", self.parameters['name']) + try: + self.vserver.invoke_successfully(clone_obj, True) + except netapp_utils.zapi.NaApiError as exc: + self.module.fail_json(msg='Error starting volume clone split: %s: %s' % (self.parameters['name'], to_native(exc))) + + def get_volume_clone(self): + if self.use_rest: + return self.get_volume_clone_rest() + clone_obj = netapp_utils.zapi.NaElement('volume-clone-get') + clone_obj.add_new_child("volume", self.parameters['name']) + try: + results = self.vserver.invoke_successfully(clone_obj, True) + except netapp_utils.zapi.NaApiError as error: + # Error 15661 denotes a volume clone not being found. + if to_native(error.code) == "15661": + return None + self.module.fail_json(msg='Error fetching volume clone information %s: %s' % (self.parameters['name'], to_native(error))) + current = None + if results.get_child_by_name('attributes'): + attributes = results.get_child_by_name('attributes') + info = attributes.get_child_by_name('volume-clone-info') + # Check if clone is currently splitting. Whilst a split is in + # progress, these attributes are present in 'volume-clone-info': + # block-percentage-complete, blocks-scanned & blocks-updated. + current = { + 'split': bool( + info.get_child_by_name('block-percentage-complete') + or info.get_child_by_name('blocks-scanned') + or info.get_child_by_name('blocks-updated') + ) + } + return current + + def get_volume_clone_rest(self): + api = 'storage/volumes' + params = {'name': self.parameters['name'], + 'svm.name': self.parameters['vserver'], + 'fields': 'clone.is_flexclone,uuid'} + record, error = rest_generic.get_one_record(self.rest_api, api, params) + if error: + self.module.fail_json(msg='Error getting volume clone %s: %s' % (self.parameters['name'], to_native(error))) + if record: + return self.format_get_volume_clone_rest(record) + return record + + def format_get_volume_clone_rest(self, record): + return { + 'name': record.get('name', None), + 'uuid': record.get('uuid', None), + 'is_clone': self.na_helper.safe_get(record, ['clone', 'is_flexclone']), + # if it is a FlexClone, it is not split. + # if it is not a FlexClone, it can be either the result of a split, or a plain volume. We mark it as split, + # as it cannot be split again. + 'split': self.na_helper.safe_get(record, ['clone', 'is_flexclone']) is not True + } + + def create_volume_clone_rest(self): + api = 'storage/volumes' + body = {'name': self.parameters['name'], + 'clone.parent_volume.name': self.parameters['parent_volume'], + "clone.is_flexclone": True, + "svm.name": self.parameters['vserver']} + if self.parameters.get('qos_policy_group_name'): + body['qos.policy.name'] = self.parameters['qos_policy_group_name'] + if self.parameters.get('parent_snapshot'): + body['clone.parent_snapshot.name'] = self.parameters['parent_snapshot'] + if self.parameters.get('parent_vserver'): + body['clone.parent_svm.name'] = self.parameters['parent_vserver'] + if self.parameters.get('volume_type'): + body['type'] = self.parameters['volume_type'] + if self.parameters.get('junction_path'): + body['nas.path'] = self.parameters['junction_path'] + if self.parameters.get('uid'): + body['nas.uid'] = self.parameters['uid'] + if self.parameters.get('gid'): + body['nas.gid'] = self.parameters['gid'] + query = {'return_records': 'true'} # in order to capture UUID + response, error = rest_generic.post_async(self.rest_api, api, body, query, job_timeout=120) + if error: + self.module.fail_json( + msg='Error creating volume clone %s: %s' % (self.parameters['name'], to_native(error))) + if response: + record, error = rrh.check_for_0_or_1_records(api, response, error, query) + if not error and record and 'uuid' not in record: + error = 'uuid key not present in %s:' % record + if error: + self.module.fail_json(msg='Error: failed to parse create clone response: %s' % error) + if record: + self.uuid = record['uuid'] + + def start_volume_clone_split_rest(self): + if self.uuid is None: + self.module.fail_json(msg='Error starting volume clone split %s: %s' % (self.parameters['name'], + 'clone UUID is not set')) + api = 'storage/volumes' + body = {'clone.split_initiated': True} + dummy, error = rest_generic.patch_async(self.rest_api, api, self.uuid, body, job_timeout=120) + if error: + self.module.fail_json(msg='Error starting volume clone split %s: %s' % (self.parameters['name'], + to_native(error))) + + def apply(self): + """ + Run Module based on playbook + """ + current = self.get_volume_clone() + if self.use_rest and current: + self.uuid = current['uuid'] + if self.use_rest and current and not current['is_clone'] and not self.parameters.get('split'): + self.module.fail_json( + msg="Error: a volume %s which is not a FlexClone already exists, and split not requested." % self.parameters['name']) + modify = None + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + # the only thing that is supported is split + current_split = {'split': current.get('split')} if current else None + modify = self.na_helper.get_modified_attributes(current_split, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_volume_clone() + if self.parameters.get('split'): + self.modify_volume_clone() + if modify: + self.modify_volume_clone() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap Volume Clone object and runs the correct play task + """ + obj = NetAppONTAPVolumeClone() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py new file mode 100644 index 000000000..9da58b0a9 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_efficiency.py @@ -0,0 +1,715 @@ +#!/usr/bin/python + +# (c) 2021-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_volume_efficiency +short_description: NetApp ONTAP enables, disables or modifies volume efficiency +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '21.2.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Enable, modify or disable volume efficiency. + - Either path or volume_name is required. + - Only admin user can modify volume efficiency. +options: + state: + description: + - Whether the specified volume efficiency should be enabled or not. + choices: ['present', 'absent'] + default: present + type: str + + vserver: + description: + - Specifies the vserver for the volume. + required: true + type: str + + path: + description: + - Specifies the path for the volume. + - Either C(path) or C(volume_name) is required. + - Requires ONTAP 9.9.1 or later with REST. + type: str + + volume_name: + description: + - Specifies the volume name. + version_added: 22.3.0 + type: str + + schedule: + description: + - Specifies the storage efficiency schedule. + - Only supported with ZAPI. + type: str + + policy: + description: + - Specifies the storage efficiency policy to use. + - By default, the following names are available 'auto', 'default', 'inline-only', '-'. + - Requires ONTAP 9.7 or later with REST. + type: str + + enable_compression: + description: + - Specifies if compression is to be enabled. + type: bool + + enable_inline_compression: + description: + - Specifies if in-line compression is to be enabled. + type: bool + + enable_inline_dedupe: + description: + - Specifies if in-line deduplication is to be enabled, only supported on AFF systems or hybrid aggregates. + type: bool + + enable_data_compaction: + description: + - Specifies if compaction is to be enabled. + type: bool + + enable_cross_volume_inline_dedupe: + description: + - Specifies if in-line cross volume inline deduplication is to be enabled, this can only be enabled when inline deduplication is enabled. + type: bool + + enable_cross_volume_background_dedupe: + description: + - Specifies if cross volume background deduplication is to be enabled, this can only be enabled when inline deduplication is enabled. + type: bool + + volume_efficiency: + description: + - Start or Stop a volume efficiency operation on a given volume path. + - Requires ONTAP 9.11.1 or later with REST. + choices: ['start', 'stop'] + version_added: '21.4.0' + type: str + + start_ve_scan_all: + description: + - Specifies the scanner to scan the entire volume without applying share block optimization. + - Only supported with ZAPI. + version_added: '21.4.0' + type: bool + + start_ve_build_metadata: + description: + - Specifies the scanner to scan the entire and generate fingerprint database without attempting the sharing. + - Only supported with ZAPI. + version_added: '21.4.0' + type: bool + + start_ve_delete_checkpoint: + description: + - Specifies the scanner to delete existing checkpoint and start the operation from the begining. + - Only supported with ZAPI. + version_added: '21.4.0' + type: bool + + start_ve_queue_operation: + description: + - Specifies the operation to queue if an exisitng operation is already running on the volume and in the fingerprint verification phase. + - Only supported with ZAPI. + version_added: '21.4.0' + type: bool + + start_ve_scan_old_data: + description: + - Specifies the operation to scan the file system to process all the existing data. + - Requires ONTAP 9.11.1 or later with REST. + version_added: '21.4.0' + type: bool + + start_ve_qos_policy: + description: + - Specifies the QoS policy for the operation. + - Default is best-effort in ZAPI. + - Only supported with ZAPI. + choices: ['background', 'best-effort'] + version_added: '21.4.0' + type: str + + stop_ve_all_operations: + description: + - Specifies that all running and queued operations to be stopped. + - Only supported with ZAPI. + version_added: '21.4.0' + type: bool + + storage_efficiency_mode: + description: + - Storage efficiency mode used by volume. This parameter is only supported on AFF platforms. + - Requires ONTAP 9.10.1 or later. + choices: ['default', 'efficient'] + type: str + version_added: '21.14.0' + +notes: + - supports ZAPI and REST. REST requires ONTAP 9.6 or later. + - supports check mode. +""" + +EXAMPLES = """ + - name: Enable Volume efficiency + netapp.ontap.na_ontap_volume_efficiency: + state: present + vserver: "TESTSVM" + path: "/vol/test_sis" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Disable Volume efficiency test + netapp.ontap.na_ontap_volume_efficiency: + state: absent + vserver: "TESTSVM" + path: "/vol/test_sis" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Modify storage efficiency schedule with ZAPI. + netapp.ontap.na_ontap_volume_efficiency: + state: present + vserver: "TESTSVM" + path: "/vol/test_sis" + schedule: "mon-sun@0,1,23" + enable_compression: true + enable_inline_compression: true + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Start volume efficiency + netapp.ontap.na_ontap_volume_efficiency: + state: present + vserver: "TESTSVM" + path: "/vol/test_sis" + volume_efficiency: "start" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: Stop volume efficiency + netapp.ontap.na_ontap_volume_efficiency: + state: present + vserver: "TESTSVM" + path: "/vol/test_sis" + volume_efficiency: "stop" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + + - name: modify volume efficiency with volume name in REST. + netapp.ontap.na_ontap_volume_efficiency: + state: present + vserver: "TESTSVM" + volume_name: "test_sis" + volume_efficiency: "stop" + enable_compression: True + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false + +""" + +RETURN = """ + +""" + +import copy +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppOntapVolumeEfficiency(object): + """ + Creates, Modifies and Disables a Volume Efficiency + """ + def __init__(self): + """ + Initialize the ONTAP Volume Efficiency class + """ + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + path=dict(required=False, type='str'), + volume_name=dict(required=False, type='str'), + schedule=dict(required=False, type='str'), + policy=dict(required=False, type='str'), + enable_inline_compression=dict(required=False, type='bool'), + enable_compression=dict(required=False, type='bool'), + enable_inline_dedupe=dict(required=False, type='bool'), + enable_data_compaction=dict(required=False, type='bool'), + enable_cross_volume_inline_dedupe=dict(required=False, type='bool'), + enable_cross_volume_background_dedupe=dict(required=False, type='bool'), + storage_efficiency_mode=dict(required=False, choices=['default', 'efficient'], type='str'), + volume_efficiency=dict(required=False, choices=['start', 'stop'], type='str'), + start_ve_scan_all=dict(required=False, type='bool'), + start_ve_build_metadata=dict(required=False, type='bool'), + start_ve_delete_checkpoint=dict(required=False, type='bool'), + start_ve_queue_operation=dict(required=False, type='bool'), + start_ve_scan_old_data=dict(required=False, type='bool'), + start_ve_qos_policy=dict(required=False, choices=['background', 'best-effort'], type='str'), + stop_ve_all_operations=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_if=[('start_ve_scan_all', True, ['start_ve_scan_old_data'])], + required_one_of=[('path', 'volume_name')], + mutually_exclusive=[('policy', 'schedule'), ('path', 'volume_name')] + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters['state'] == 'present': + self.parameters['enabled'] = 'enabled' + else: + self.parameters['enabled'] = 'disabled' + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + partially_supported_rest_properties = [ + ['policy', (9, 7)], ['storage_efficiency_mode', (9, 10, 1)], ['path', (9, 9, 1)], + # make op_state active/idle is supported from 9.11.1 or later with REST. + ['volume_efficiency', (9, 11, 1)], ['start_ve_scan_old_data', (9, 11, 1)] + ] + unsupported_rest_properties = [ + 'schedule', 'start_ve_scan_all', 'start_ve_build_metadata', 'start_ve_delete_checkpoint', + 'start_ve_queue_operation', 'start_ve_qos_policy', 'stop_ve_all_operations' + ] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties, partially_supported_rest_properties) + self.volume_uuid = None + if 'volume_efficiency' in self.parameters: + if self.parameters['volume_efficiency'] == 'start': + self.parameters['status'] = 'running' if not self.use_rest else 'active' + else: + self.parameters['status'] = 'idle' + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.validate_and_configure_zapi() + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def validate_and_configure_zapi(self): + if self.parameters.get('storage_efficiency_mode'): + self.module.fail_json(msg="Error: cannot set storage_efficiency_mode in ZAPI") + # set default value for ZAPI like before as REST currently not support this option. + if not self.parameters.get('start_ve_qos_policy'): + self.parameters['start_ve_qos_policy'] = 'best-effort' + if self.parameters.get('volume_name'): + self.parameters['path'] = '/vol/' + self.parameters['volume_name'] + self.module.warn("ZAPI requires '/vol/' present in the volume path, updated path: %s" % self.parameters['path']) + + def get_volume_efficiency(self): + """ + get the storage efficiency for a given path + :return: dict of sis if exist, None if not + """ + + return_value = None + + if self.use_rest: + api = 'storage/volumes' + query = {'svm.name': self.parameters['vserver'], 'fields': 'uuid,efficiency'} + if self.parameters.get('path'): + query['efficiency.volume_path'] = self.parameters['path'] + else: + query['name'] = self.parameters['volume_name'] + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + path_or_volume = self.parameters.get('path') or self.parameters.get('volume_name') + self.module.fail_json(msg='Error getting volume efficiency for path %s on vserver %s: %s' % ( + path_or_volume, self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + if record: + return_value = self.format_rest_record(record) + return return_value + + else: + + sis_get_iter = netapp_utils.zapi.NaElement('sis-get-iter') + sis_status_info = netapp_utils.zapi.NaElement('sis-status-info') + sis_status_info.add_new_child('path', self.parameters['path']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(sis_status_info) + sis_get_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(sis_get_iter, True) + if result.get_child_by_name('attributes-list'): + sis_status_attributes = result['attributes-list']['sis-status-info'] + return_value = { + 'path': sis_status_attributes['path'], + 'enabled': sis_status_attributes['state'], + 'status': sis_status_attributes['status'], + 'schedule': sis_status_attributes['schedule'], + 'enable_inline_compression': self.na_helper.get_value_for_bool( + True, sis_status_attributes.get_child_content('is-inline-compression-enabled') + ), + 'enable_compression': self.na_helper.get_value_for_bool(True, sis_status_attributes.get_child_content('is-compression-enabled')), + 'enable_inline_dedupe': self.na_helper.get_value_for_bool(True, sis_status_attributes.get_child_content('is-inline-dedupe-enabled')), + 'enable_data_compaction': self.na_helper.get_value_for_bool( + True, sis_status_attributes.get_child_content('is-data-compaction-enabled') + ), + 'enable_cross_volume_inline_dedupe': self.na_helper.get_value_for_bool( + True, sis_status_attributes.get_child_content('is-cross-volume-inline-dedupe-enabled') + ), + 'enable_cross_volume_background_dedupe': self.na_helper.get_value_for_bool( + True, sis_status_attributes.get_child_content('is-cross-volume-background-dedupe-enabled') + ) + } + + if sis_status_attributes.get_child_by_name('policy'): + return_value['policy'] = sis_status_attributes['policy'] + else: + return_value['policy'] = '-' + + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting volume efficiency for path %s on vserver %s: %s' % ( + self.parameters['path'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc() + ) + return return_value + + def enable_volume_efficiency(self): + """ + Enables Volume efficiency for a given volume by path + """ + sis_enable = netapp_utils.zapi.NaElement("sis-enable") + sis_enable.add_new_child("path", self.parameters['path']) + + try: + self.server.invoke_successfully(sis_enable, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error enabling storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'], + self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()) + + def disable_volume_efficiency(self): + """ + Disables Volume efficiency for a given volume by path + """ + sis_disable = netapp_utils.zapi.NaElement("sis-disable") + sis_disable.add_new_child("path", self.parameters['path']) + + try: + self.server.invoke_successfully(sis_disable, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error disabling storage efficiency for path %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def modify_volume_efficiency(self, body=None): + """ + Modifies volume efficiency settings for a given volume by path + """ + + if self.use_rest: + if not body: + return + dummy, error = rest_generic.patch_async(self.rest_api, 'storage/volumes', self.volume_uuid, body) + if error: + if 'Unexpected argument "storage_efficiency_mode".' in error: + error = "cannot modify storage_efficiency mode in non AFF platform." + if 'not authorized' in error: + error = "%s user is not authorized to modify volume efficiency" % self.parameters.get('username') + self.module.fail_json(msg='Error in volume/efficiency patch: %s' % error) + + else: + + sis_config_obj = netapp_utils.zapi.NaElement("sis-set-config") + sis_config_obj.add_new_child('path', self.parameters['path']) + if 'schedule' in self.parameters: + sis_config_obj.add_new_child('schedule', self.parameters['schedule']) + if 'policy' in self.parameters: + sis_config_obj.add_new_child('policy-name', self.parameters['policy']) + if 'enable_compression' in self.parameters: + sis_config_obj.add_new_child('enable-compression', self.na_helper.get_value_for_bool(False, self.parameters['enable_compression'])) + if 'enable_inline_compression' in self.parameters: + sis_config_obj.add_new_child('enable-inline-compression', self.na_helper.get_value_for_bool( + False, self.parameters['enable_inline_compression']) + ) + if 'enable_inline_dedupe' in self.parameters: + sis_config_obj.add_new_child('enable-inline-dedupe', self.na_helper.get_value_for_bool( + False, self.parameters['enable_inline_dedupe']) + ) + if 'enable_data_compaction' in self.parameters: + sis_config_obj.add_new_child('enable-data-compaction', self.na_helper.get_value_for_bool( + False, self.parameters['enable_data_compaction']) + ) + if 'enable_cross_volume_inline_dedupe' in self.parameters: + sis_config_obj.add_new_child('enable-cross-volume-inline-dedupe', self.na_helper.get_value_for_bool( + False, self.parameters['enable_cross_volume_inline_dedupe']) + ) + if 'enable_cross_volume_background_dedupe' in self.parameters: + sis_config_obj.add_new_child('enable-cross-volume-background-dedupe', self.na_helper.get_value_for_bool( + False, self.parameters['enable_cross_volume_background_dedupe']) + ) + + try: + self.server.invoke_successfully(sis_config_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying storage efficiency for path %s: %s' % (self.parameters['path'], to_native(error)), + exception=traceback.format_exc()) + + def start_volume_efficiency(self): + """ + Starts volume efficiency for a given flex volume by path + """ + + sis_start = netapp_utils.zapi.NaElement('sis-start') + sis_start.add_new_child('path', self.parameters['path']) + + if 'start_ve_scan_all' in self.parameters: + sis_start.add_new_child('scan-all', self.na_helper.get_value_for_bool( + False, self.parameters['start_ve_scan_all']) + ) + if 'start_ve_build_metadata' in self.parameters: + sis_start.add_new_child('build-metadata', self.na_helper.get_value_for_bool( + False, self.parameters['start_ve_build_metadata']) + ) + if 'start_ve_delete_checkpoint' in self.parameters: + sis_start.add_new_child('delete-checkpoint', self.na_helper.get_value_for_bool( + False, self.parameters['start_ve_delete_checkpoint']) + ) + if 'start_ve_queue_operation' in self.parameters: + sis_start.add_new_child('queue-operation', self.na_helper.get_value_for_bool( + False, self.parameters['start_ve_queue_operation']) + ) + if 'start_ve_scan_old_data' in self.parameters: + sis_start.add_new_child('scan', self.na_helper.get_value_for_bool( + False, self.parameters['start_ve_scan_old_data']) + ) + if 'start_ve_qos_policy' in self.parameters: + sis_start.add_new_child('qos-policy', self.parameters['start_ve_qos_policy']) + + try: + self.server.invoke_successfully(sis_start, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error starting storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'], + self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()) + + def stop_volume_efficiency(self): + """ + Stops volume efficiency for a given flex volume by path + """ + sis_stop = netapp_utils.zapi.NaElement('sis-stop') + sis_stop.add_new_child('path', self.parameters['path']) + if 'stop_ve_all_operations' in self.parameters: + sis_stop.add_new_child('all-operations', self.na_helper.get_value_for_bool( + False, self.parameters['stop_ve_all_operations']) + ) + + try: + self.server.invoke_successfully(sis_stop, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error stopping storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'], + self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()) + + def format_rest_record(self, record): + """ + returns current efficiency values. + """ + self.volume_uuid = record['uuid'] + return_value = { + 'enabled': self.na_helper.safe_get(record, ['efficiency', 'state']), + 'status': self.na_helper.safe_get(record, ['efficiency', 'op_state']), + 'enable_compression': self.na_helper.safe_get(record, ['efficiency', 'compression']), + 'enable_inline_dedupe': self.na_helper.safe_get(record, ['efficiency', 'dedupe']), + 'enable_data_compaction': self.na_helper.safe_get(record, ['efficiency', 'compaction']), + 'enable_cross_volume_inline_dedupe': self.na_helper.safe_get(record, ['efficiency', 'cross_volume_dedupe']) + } + if not self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 9, 1): + # efficiency is enabled if dedupe is either background or both. + # it's disabled if both dedupe and compression is none. + dedupe = self.na_helper.safe_get(record, ['efficiency', 'dedupe']) + if dedupe in ['background', 'both']: + return_value['enabled'] = 'enabled' + elif dedupe == 'none' and self.na_helper.safe_get(record, ['efficiency', 'compression']) == 'none': + return_value['enabled'] = 'disabled' + if self.parameters.get('storage_efficiency_mode'): + return_value['storage_efficiency_mode'] = self.na_helper.safe_get(record, ['efficiency', 'storage_efficiency_mode']) + if self.parameters.get('policy'): + return_value['policy'] = self.na_helper.safe_get(record, ['efficiency', 'policy', 'name']) + compression, inline_compression, cross_volume_inline_dedupe, cross_volume_background_dedupe = False, False, False, False + inline_dedupe, compaction = False, False + if return_value['enable_compression'] in ['background', 'both']: + compression = True + if return_value['enable_compression'] in ['inline', 'both']: + inline_compression = True + if return_value['enable_cross_volume_inline_dedupe'] in ['inline', 'both']: + cross_volume_inline_dedupe = True + if return_value['enable_cross_volume_inline_dedupe'] in ['background', 'both']: + cross_volume_background_dedupe = True + if return_value['enable_inline_dedupe'] in ['inline', 'both']: + inline_dedupe = True + if return_value['enable_data_compaction'] == 'inline': + compaction = True + return_value['enable_compression'] = compression + return_value['enable_inline_compression'] = inline_compression + return_value['enable_cross_volume_inline_dedupe'] = cross_volume_inline_dedupe + return_value['enable_cross_volume_background_dedupe'] = cross_volume_background_dedupe + return_value['enable_inline_dedupe'] = inline_dedupe + return_value['enable_data_compaction'] = compaction + return return_value + + def form_modify_body_rest(self, modify, current): + # disable volume efficiency requires dedupe and compression set to 'none'. + if modify.get('enabled') == 'disabled': + return {'efficiency': {'dedupe': 'none', 'compression': 'none', 'compaction': 'none', 'cross_volume_dedupe': 'none'}} + body = {} + if modify.get('enabled') == 'enabled': + body['efficiency.dedupe'] = 'background' + # there are cases where ZAPI allows setting cross_volume_background_dedupe and inline_dedupe and REST not. + if 'enable_compression' in modify or 'enable_inline_compression' in modify: + body['efficiency.compression'] = self.derive_efficiency_type(modify.get('enable_compression'), modify.get('enable_inline_compression'), + current.get('enable_compression'), current.get('enable_inline_compression')) + + if 'enable_cross_volume_background_dedupe' in modify or 'enable_cross_volume_inline_dedupe' in modify: + body['efficiency.cross_volume_dedupe'] = self.derive_efficiency_type(modify.get('enable_cross_volume_background_dedupe'), + modify.get('enable_cross_volume_inline_dedupe'), + current.get('enable_cross_volume_background_dedupe'), + current.get('enable_cross_volume_inline_dedupe')) + + if modify.get('enable_data_compaction'): + body['efficiency.compaction'] = 'inline' + elif modify.get('enable_data_compaction') is False: + body['efficiency.compaction'] = 'none' + + if modify.get('enable_inline_dedupe'): + body['efficiency.dedupe'] = 'both' + elif modify.get('enable_inline_dedupe') is False: + body['efficiency.dedupe'] = 'background' + # REST changes policy to default, so use policy in params. + if self.parameters.get('policy'): + body['efficiency.policy.name'] = self.parameters['policy'] + if modify.get('storage_efficiency_mode'): + body['storage_efficiency_mode'] = modify['storage_efficiency_mode'] + + # start/stop vol efficiency + if modify.get('status'): + body['efficiency.scanner.state'] = modify['status'] + if 'start_ve_scan_old_data' in self.parameters: + body['efficiency.scanner.scan_old_data'] = self.parameters['start_ve_scan_old_data'] + return body + + @staticmethod + def derive_efficiency_type(desired_background, desired_inline, current_background, current_inline): + if ((desired_background and desired_inline) or + (desired_background and desired_inline is None and current_inline) or + (desired_background is None and desired_inline and current_background)): + return 'both' + elif ((desired_background and desired_inline is False) or + (desired_background and desired_inline is None and not current_inline) or + (desired_background is None and desired_inline is False and current_background)): + return 'background' + elif ((desired_background is False and desired_inline) or + (desired_background is False and desired_inline is None and current_inline) or + (desired_background is None and desired_inline and not current_background)): + return 'inline' + elif ((desired_background is False and desired_inline is False) or + (desired_background is False and desired_inline is None and not current_inline) or + (desired_background is None and desired_inline is False and not current_background)): + return 'none' + + def validate_efficiency_compression(self, modify): + """ + validate: + - no efficiency keys are set when state is disabled. + """ + if self.parameters['enabled'] == 'disabled': + # if any of the keys are set, efficiency gets enabled, error out if any of eff keys are set and state is absent. + unsupported_enable_eff_keys = [ + 'enable_compression', 'enable_inline_compression', 'enable_inline_dedupe', + 'enable_cross_volume_inline_dedupe', 'enable_cross_volume_background_dedupe', 'enable_data_compaction' + ] + used_unsupported_enable_eff_keys = [key for key in unsupported_enable_eff_keys if self.parameters.get(key)] + if used_unsupported_enable_eff_keys: + disable_str = 'when volume efficiency already disabled, retry with state: present' + if modify.get('enabled') == 'disabled': + disable_str = 'when trying to disable volume efficiency' + self.module.fail_json(msg="Error: cannot set compression keys: %s %s" % (used_unsupported_enable_eff_keys, disable_str)) + + def apply(self): + current = self.get_volume_efficiency() + ve_status = None + + # If the volume efficiency does not exist for a given path to create this current is set to disabled + # this is for ONTAP systems that do not enable efficiency by default. + if current is None: + current = {'enabled': 'disabled'} + modify = self.na_helper.get_modified_attributes(current, self.parameters) + to_modify = copy.deepcopy(modify) + self.validate_efficiency_compression(modify) + if self.na_helper.changed and not self.module.check_mode: + # enable/disable, start/stop & modify vol efficiency handled in REST PATCH. + if self.use_rest: + self.modify_volume_efficiency(self.form_modify_body_rest(modify, current)) + else: + if 'enabled' in modify: + if modify['enabled'] == 'enabled': + self.enable_volume_efficiency() + # Checking to see if there are any additional parameters that need to be set after + # enabling volume efficiency required for Non-AFF systems + current = self.get_volume_efficiency() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + to_modify['modify_after_enable'] = copy.deepcopy(modify) + elif modify['enabled'] == 'disabled': + self.disable_volume_efficiency() + # key may not exist anymore, if modify is refreshed at line 686 + modify.pop('enabled', None) + + if 'status' in modify: + ve_status = modify['status'] + del modify['status'] + + # Removed the enabled and volume efficiency status, + # if there is anything remaining in the modify dict we need to modify. + if modify: + self.modify_volume_efficiency() + + if ve_status == 'running': + self.start_volume_efficiency() + elif ve_status == 'idle': + self.stop_volume_efficiency() + + result = netapp_utils.generate_result(self.na_helper.changed, modify=to_modify) + self.module.exit_json(**result) + + +def main(): + """ + Enables, modifies or disables NetApp Ontap volume efficiency + """ + obj = NetAppOntapVolumeEfficiency() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py new file mode 100644 index 000000000..272d8bf92 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_volume_snaplock.py @@ -0,0 +1,227 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' + +module: na_ontap_volume_snaplock + +short_description: NetApp ONTAP manage volume snaplock retention. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: '20.2.0' +author: NetApp Ansible Team (@carchi8py) +description: +- Modifies the snaplock retention of volumes on NetApp ONTAP. +options: + name: + description: + - The name of the volume to manage. + type: str + required: true + + vserver: + description: + - Name of the vserver to use. + type: str + required: true + + default_retention_period: + description: + - Specifies the default retention period that will be applied. + - The format is " " for example "10 days", the following units are valid + - "seconds" + - "minutes" + - "hours" + - "days" + - "months" + - "years" + - If this option is specified as "max", then maximum_retention_period will be used as the default retention period. + type: str + + autocommit_period: + description: + - Specifies the autocommit-period for the snaplock volume. + - The format is " " for example "8 hours", the following units are valid + - "seconds" + - "minutes" + - "hours" + - "days" + - "months" + - "years" + type: str + + is_volume_append_mode_enabled: + description: + - Specifies if the volume append mode must be enabled or disabled. + - It can be modified only when the volume is not mounted and does not have any data or Snapshot copy. + - Volume append mode is not supported on SnapLock audit log volumes. + - When it is enabled, all files created with write permissions on the volume will be WORM appendable files by default. + - All WORM appendable files not modified for a period greater than the autocommit period of the volume are also committed to WORM read-only state. + type: bool + + maximum_retention_period: + description: + - Specifies the allowed maximum retention period that will be applied. + - The format is " " for example "2 years", the following units are valid + - "seconds" + - "minutes" + - "hours" + - "days" + - "months" + - "years" + type: str + + minimum_retention_period: + description: + - Specifies the allowed minimum retention period that will be applied. + - The format is " " for example "1 days", the following units are valid + - "seconds" + - "minutes" + - "hours" + - "days" + - "months" + - "years" + type: str + +notes: + - supports ZAPI only. + - for REST, snaplock is supported in na_ontap_volume starting with 21.18.0. +''' + +EXAMPLES = """ + - name: Set volume snaplock + na_ontap_volume_snaplock: + vserver: svm + name: ansibleVolume + default_retention_period: "5 days" + minimum_retention_period: "0 years" + maximum_retention_period: "10 days" + is_volume_append_mode_enabled: False + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + +""" + +RETURN = """ +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapVolumeSnaplock(object): + '''Class with volume operations''' + + def __init__(self): + '''Initialize module parameters''' + + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + name=dict(required=True, type='str'), + vserver=dict(required=True, type='str'), + default_retention_period=dict(required=False, type='str'), + maximum_retention_period=dict(required=False, type='str'), + minimum_retention_period=dict(required=False, type='str'), + autocommit_period=dict(required=False, type='str'), + is_volume_append_mode_enabled=dict(required=False, type='bool'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_replaces('na_ontap_volume', self.module) + + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_volume_snaplock_attrs(self): + """ + Return volume-get-snaplock-attrs query results + :param vol_name: name of the volume + :return: dict of the volume snaplock attrs + """ + volume_snaplock = netapp_utils.zapi.NaElement('volume-get-snaplock-attrs') + volume_snaplock.add_new_child('volume', self.parameters['name']) + + try: + result = self.server.invoke_successfully(volume_snaplock, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching snaplock attributes for volume %s : %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + return_value = None + + if result.get_child_by_name('snaplock-attrs'): + volume_snaplock_attributes = result['snaplock-attrs']['snaplock-attrs-info'] + return_value = { + 'autocommit_period': volume_snaplock_attributes['autocommit-period'], + 'default_retention_period': volume_snaplock_attributes['default-retention-period'], + 'is_volume_append_mode_enabled': self.na_helper.get_value_for_bool(True, volume_snaplock_attributes['is-volume-append-mode-enabled']), + 'maximum_retention_period': volume_snaplock_attributes['maximum-retention-period'], + 'minimum_retention_period': volume_snaplock_attributes['minimum-retention-period'], + } + return return_value + + def set_volume_snaplock_attrs(self, modify): + '''Set ONTAP volume snaplock attributes''' + volume_snaplock_obj = netapp_utils.zapi.NaElement('volume-set-snaplock-attrs') + volume_snaplock_obj.add_new_child('volume', self.parameters['name']) + if modify.get('autocommit_period') is not None: + volume_snaplock_obj.add_new_child('autocommit-period', self.parameters['autocommit_period']) + if modify.get('default_retention_period') is not None: + volume_snaplock_obj.add_new_child('default-retention-period', self.parameters['default_retention_period']) + if modify.get('is_volume_append_mode_enabled') is not None: + volume_snaplock_obj.add_new_child('is-volume-append-mode-enabled', + self.na_helper.get_value_for_bool(False, self.parameters['is_volume_append_mode_enabled'])) + if modify.get('maximum_retention_period') is not None: + volume_snaplock_obj.add_new_child('maximum-retention-period', self.parameters['maximum_retention_period']) + if modify.get('minimum_retention_period') is not None: + volume_snaplock_obj.add_new_child('minimum-retention-period', self.parameters['minimum_retention_period']) + try: + self.server.invoke_successfully(volume_snaplock_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error setting snaplock attributes for volume %s : %s' + % (self.parameters['name'], to_native(error)), + exception=traceback.format_exc()) + + def apply(self): + current, modify = self.get_volume_snaplock_attrs(), None + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed and not self.module.check_mode: + self.set_volume_snaplock_attrs(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + '''Set volume snaplock attributes from playbook''' + obj = NetAppOntapVolumeSnaplock() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py new file mode 100644 index 000000000..e089d3b8a --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan.py @@ -0,0 +1,168 @@ +#!/usr/bin/python + +# (c) 2018-2019, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_vscan +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +module: na_ontap_vscan +short_description: NetApp ONTAP Vscan enable/disable. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) +notes: +- on demand task, on_access_policy and scanner_pools must be set up before running this module +description: +- Enable and Disable Vscan +options: + enable: + description: + - Whether to enable to disable a Vscan + type: bool + default: True + + vserver: + description: + - the name of the data vserver to use. + required: true + type: str +''' + +EXAMPLES = """ + - name: Enable Vscan + na_ontap_vscan: + enable: True + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: trident_svm + + - name: Disable Vscan + na_ontap_vscan: + enable: False + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: trident_svm +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapVscan(object): + ''' enable/disable vscan ''' + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + enable=dict(type='bool', default=True), + vserver=dict(required=True, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # API should be used for ONTAP 9.6 or higher, Zapi for lower version + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + else: + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_vscan(self): + if self.use_rest: + params = {'fields': 'svm,enabled', + "svm.name": self.parameters['vserver']} + api = "protocols/vscan" + message, error = self.rest_api.get(api, params) + if error: + self.module.fail_json(msg=error) + return message['records'][0] + else: + vscan_status_iter = netapp_utils.zapi.NaElement('vscan-status-get-iter') + vscan_status_info = netapp_utils.zapi.NaElement('vscan-status-info') + vscan_status_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(vscan_status_info) + vscan_status_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(vscan_status_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error getting Vscan info for Vserver %s: %s' % + (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return result.get_child_by_name('attributes-list').get_child_by_name('vscan-status-info') + + def enable_vscan(self, uuid=None): + if self.use_rest: + params = {"svm.name": self.parameters['vserver']} + data = {"enabled": self.parameters['enable']} + api = "protocols/vscan/" + uuid + dummy, error = self.rest_api.patch(api, data, params) + if error is not None: + self.module.fail_json(msg=error) + else: + vscan_status_obj = netapp_utils.zapi.NaElement("vscan-status-modify") + vscan_status_obj.add_new_child('is-vscan-enabled', str(self.parameters['enable'])) + try: + self.server.invoke_successfully(vscan_status_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg="Error Enable/Disabling Vscan: %s" % to_native(error), exception=traceback.format_exc()) + + def apply(self): + changed = False + current = self.get_vscan() + if self.use_rest: + if current['enabled'] != self.parameters['enable']: + if not self.module.check_mode: + self.enable_vscan(current['svm']['uuid']) + changed = True + else: + if current.get_child_content('is-vscan-enabled') != str(self.parameters['enable']).lower(): + if not self.module.check_mode: + self.enable_vscan() + changed = True + self.module.exit_json(changed=changed) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapVscan() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py new file mode 100644 index 000000000..08da1fe7e --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_access_policy.py @@ -0,0 +1,524 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_ontap_vscan_on_access_policy +short_description: NetApp ONTAP Vscan on access policy configuration. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: + - Configure on access policy for Vscan (virus scan) +options: + state: + description: + - Whether a Vscan on Access policy is present or not + choices: ['present', 'absent'] + type: str + default: present + + vserver: + description: + - the name of the data vserver to use. + required: true + type: str + + policy_name: + description: + - The name of the policy + required: true + type: str + + file_ext_to_exclude: + description: + - File extensions for which On-Access scanning must not be performed. + type: list + elements: str + + file_ext_to_include: + description: + - File extensions for which On-Access scanning is considered. The default value is '*', which means that all files are considered for scanning except + - those which are excluded from scanning. + type: list + elements: str + + filters: + description: + - A list of filters which can be used to define the scope of the On-Access policy more precisely. The filters can be added in any order. Possible values + - scan_ro_volume Enable scans for read-only volume, + - scan_execute_access Scan only files opened with execute-access (CIFS only). + - deprecated with REST, use C(scan_readonly_volumes) or C(only_execute_access). + type: list + elements: str + + is_scan_mandatory: + description: + - Specifies whether access to a file is allowed if there are no external virus-scanning servers available for virus scanning. + - If not specified, default value is False in ZAPI. + type: bool + + max_file_size: + description: + - Max file-size (in bytes) allowed for scanning. The default value of 2147483648 (2GB) is taken if not provided at the time of creating a policy. + type: int + + paths_to_exclude: + description: + - File paths for which On-Access scanning must not be performed. + type: list + elements: str + + scan_files_with_no_ext: + description: + - Specifies whether files without any extension are considered for scanning or not. + - If not specified, default value is True in ZAPI. + type: bool + + policy_status: + description: + - Status for the created policy + type: bool + version_added: 20.8.0 + + scan_readonly_volumes: + description: + - Specifies whether or not read-only volume can be scanned. + - If not specified, default value is False in creating policy. + type: bool + version_added: 21.20.0 + + only_execute_access: + description: + - Scan only files opened with execute-access. + - If not specified, default value is False in creating policy. + type: bool + version_added: 21.20.0 +''' + +EXAMPLES = """ + - name: Create Vscan On Access Policy + netapp.ontap.na_ontap_vscan_on_access_policy: + state: present + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + policy_name: carchi_policy + file_ext_to_exclude: ['exe', 'yml'] + - name: Create Vscan On Access Policy with Policy Status enabled + netapp.ontap.na_ontap_vscan_on_access_policy: + state: present + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + policy_name: carchi_policy + file_ext_to_exclude: ['exe', 'yml'] + policy_status: True + - name: modify Vscan on Access Policy + netapp.ontap.na_ontap_vscan_on_access_policy: + state: present + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + policy_name: carchi_policy + file_ext_to_exclude: ['exe', 'yml', 'py'] + - name: Delete On Access Policy + netapp.ontap.na_ontap_vscan_on_access_policy: + state: absent + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + policy_name: carchi_policy +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic, rest_vserver + + +class NetAppOntapVscanOnAccessPolicy: + """ + Create/Modify/Delete a Vscan OnAccess policy + """ + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + policy_name=dict(required=True, type='str'), + file_ext_to_exclude=dict(required=False, type='list', elements='str'), + file_ext_to_include=dict(required=False, type='list', elements='str'), + filters=dict(required=False, type='list', elements='str'), + is_scan_mandatory=dict(required=False, type='bool'), + max_file_size=dict(required=False, type="int"), + paths_to_exclude=dict(required=False, type='list', elements='str'), + scan_files_with_no_ext=dict(required=False, type='bool'), + policy_status=dict(required=False, type='bool'), + scan_readonly_volumes=dict(required=False, type='bool'), + only_execute_access=dict(required=False, type='bool') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['filters', 'scan_readonly_volumes'], + ['filters', 'only_execute_access'] + ] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # Set up Rest API + self.rest_api = OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + self.svm_uuid = None + + # validate list options not contains '' in it in REST. + if self.use_rest: + self.validate_options() + + # file_ext_to_include cannot be empty in both ZAPI and REST. + if 'file_ext_to_include' in self.parameters and len(self.parameters['file_ext_to_include']) < 1: + self.module.fail_json(msg="Error: The value for file_ext_include cannot be empty") + + # map filters options to rest equivalent options. + if self.use_rest and 'filters' in self.parameters: + self.parameters['only_execute_access'], self.parameters['scan_readonly_volumes'] = False, False + for filter in self.parameters['filters']: + if filter.lower() not in ['scan_execute_access', 'scan_ro_volume']: + self.module.fail_json(msg="Error: Invalid value %s specified for filters %s" % filter) + if filter.lower() == 'scan_execute_access': + self.parameters['only_execute_access'] = True + if filter.lower() == 'scan_ro_volume': + self.parameters['scan_readonly_volumes'] = True + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + self.set_playbook_zapi_key_map() + + # set default value for is_scan_mandatory and scan_files_with_no_ext if not set. + if self.parameters.get('is_scan_mandatory') is None: + self.parameters['is_scan_mandatory'] = False + if self.parameters.get('scan_files_with_no_ext') is None: + self.parameters['scan_files_with_no_ext'] = True + + # form filters from REST options only_execute_access and scan_readonly_volumes. + filters = [] + if self.parameters.get('only_execute_access'): + filters.append('scan_execute_access') + if self.parameters.get('scan_readonly_volumes'): + filters.append('scan_ro_volume') + if filters: + self.parameters['filters'] = filters + + def validate_options(self): + list_options = ['filters', 'file_ext_to_exclude', 'file_ext_to_include', 'paths_to_exclude'] + invalid_options = [] + for option in list_options: + if option in self.parameters: + for value in self.parameters[option]: + # '' is an invalid value. + if len(value.strip()) < 1: + invalid_options.append(option) + if invalid_options: + self.module.fail_json(msg="Error: Invalid value specified for option(s): %s" % ', '.join(invalid_options)) + + def set_playbook_zapi_key_map(self): + self.na_helper.zapi_int_keys = { + 'max_file_size': 'max-file-size' + } + self.na_helper.zapi_str_keys = { + 'vserver': 'vserver', + 'policy_name': 'policy-name' + } + self.na_helper.zapi_bool_keys = { + 'is_scan_mandatory': 'is-scan-mandatory', + 'policy_status': 'is-policy-enabled', + 'scan_files_with_no_ext': 'scan-files-with-no-ext' + } + self.na_helper.zapi_list_keys = { + 'file_ext_to_exclude': 'file-ext-to-exclude', + 'file_ext_to_include': 'file-ext-to-include', + 'paths_to_exclude': 'paths-to-exclude', + 'filters': 'filters' + } + + def get_on_access_policy(self): + """ + Return a Vscan on Access Policy + :return: None if there is no access policy, return the policy if there is + """ + if self.use_rest: + return self.get_on_access_policy_rest() + access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-get-iter') + access_policy_info = netapp_utils.zapi.NaElement('vscan-on-access-policy-info') + access_policy_info.add_new_child('policy-name', self.parameters['policy_name']) + access_policy_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(access_policy_info) + access_policy_obj.add_child_elem(query) + try: + result = self.server.invoke_successfully(access_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error searching Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc()) + return_value = {} + if result.get_child_by_name('num-records'): + if int(result.get_child_content('num-records')) == 1: + attributes_list = result.get_child_by_name('attributes-list') + vscan_info = attributes_list.get_child_by_name('vscan-on-access-policy-info') + for option, zapi_key in self.na_helper.zapi_int_keys.items(): + return_value[option] = self.na_helper.get_value_for_int(from_zapi=True, value=vscan_info.get_child_content(zapi_key)) + for option, zapi_key in self.na_helper.zapi_bool_keys.items(): + return_value[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=vscan_info.get_child_content(zapi_key)) + for option, zapi_key in self.na_helper.zapi_list_keys.items(): + return_value[option] = self.na_helper.get_value_for_list(from_zapi=True, zapi_parent=vscan_info.get_child_by_name(zapi_key)) + for option, zapi_key in self.na_helper.zapi_str_keys.items(): + return_value[option] = vscan_info.get_child_content(zapi_key) + return return_value + elif int(result.get_child_content('num-records')) > 1: + self.module.fail_json(msg='Mutiple Vscan on Access Policy matching %s:' % self.parameters['policy_name']) + return None + + def create_on_access_policy(self): + """ + Create a Vscan on Access policy + :return: none + """ + if self.use_rest: + return self.create_on_access_policy_rest() + access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-create') + access_policy_obj.add_new_child('policy-name', self.parameters['policy_name']) + access_policy_obj.add_new_child('protocol', 'cifs') + access_policy_obj = self._fill_in_access_policy(access_policy_obj) + + try: + self.server.invoke_successfully(access_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc()) + + def status_modify_on_access_policy(self): + """ + Update the status of policy + """ + if self.use_rest: + return self.modify_on_access_policy_rest({'policy_status': False}) + access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-status-modify') + access_policy_obj.add_new_child('policy-name', self.parameters['policy_name']) + access_policy_obj.add_new_child('policy-status', str(self.parameters['policy_status']).lower()) + + try: + self.server.invoke_successfully(access_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying status Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc()) + + def delete_on_access_policy(self): + """ + Delete a Vscan On Access Policy + :return: + """ + if self.use_rest: + return self.delete_on_access_policy_rest() + access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-delete') + access_policy_obj.add_new_child('policy-name', self.parameters['policy_name']) + try: + self.server.invoke_successfully(access_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error Deleting Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc()) + + def modify_on_access_policy(self, modify=None): + """ + Modify a Vscan On Access policy + :return: nothing + """ + if self.use_rest: + return self.modify_on_access_policy_rest(modify) + access_policy_obj = netapp_utils.zapi.NaElement('vscan-on-access-policy-modify') + access_policy_obj.add_new_child('policy-name', self.parameters['policy_name']) + access_policy_obj = self._fill_in_access_policy(access_policy_obj) + try: + self.server.invoke_successfully(access_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error Modifying Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error)), exception=traceback.format_exc()) + + def _fill_in_access_policy(self, access_policy_obj): + if self.parameters.get('is_scan_mandatory') is not None: + access_policy_obj.add_new_child('is-scan-mandatory', str(self.parameters['is_scan_mandatory']).lower()) + if self.parameters.get('max_file_size'): + access_policy_obj.add_new_child('max-file-size', str(self.parameters['max_file_size'])) + if self.parameters.get('scan_files_with_no_ext') is not None: + access_policy_obj.add_new_child('scan-files-with-no-ext', str(self.parameters['scan_files_with_no_ext'])) + if 'file_ext_to_exclude' in self.parameters: + ext_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude') + access_policy_obj.add_child_elem(ext_obj) + if len(self.parameters['file_ext_to_exclude']) < 1: + ext_obj.add_new_child('file-extension', "") + else: + for extension in self.parameters['file_ext_to_exclude']: + ext_obj.add_new_child('file-extension', extension) + if 'file_ext_to_include' in self.parameters: + ext_obj = netapp_utils.zapi.NaElement('file-ext-to-include') + access_policy_obj.add_child_elem(ext_obj) + for extension in self.parameters['file_ext_to_include']: + ext_obj.add_new_child('file-extension', extension) + if 'filters' in self.parameters: + ui_filter_obj = netapp_utils.zapi.NaElement('filters') + access_policy_obj.add_child_elem(ui_filter_obj) + if len(self.parameters['filters']) < 1: + ui_filter_obj.add_new_child('vscan-on-access-policy-ui-filter', "") + else: + for filter in self.parameters['filters']: + ui_filter_obj.add_new_child('vscan-on-access-policy-ui-filter', filter) + if 'paths_to_exclude' in self.parameters: + path_obj = netapp_utils.zapi.NaElement('paths-to-exclude') + access_policy_obj.add_child_elem(path_obj) + if len(self.parameters['paths_to_exclude']) < 1: + path_obj.add_new_child('file-path', "") + else: + for path in self.parameters['paths_to_exclude']: + path_obj.add_new_child('file-path', path) + return access_policy_obj + + def get_on_access_policy_rest(self): + self.svm_uuid = self.get_svm_uuid() + if self.svm_uuid is None: + self.module.fail_json(msg="Error: vserver %s not found" % self.parameters['vserver']) + api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid + query = {'name': self.parameters['policy_name']} + fields = 'svm,name,mandatory,scope,enabled' + record, error = rest_generic.get_one_record(self.rest_api, api, query, fields) + if error: + self.module.fail_json(msg='Error searching Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error))) + if record: + return { + 'max_file_size': self.na_helper.safe_get(record, ['scope', 'max_file_size']), + 'vserver': self.na_helper.safe_get(record, ['svm', 'name']), + 'policy_name': record['name'], + 'is_scan_mandatory': record['mandatory'], + 'policy_status': record['enabled'], + 'scan_files_with_no_ext': self.na_helper.safe_get(record, ['scope', 'scan_without_extension']), + 'file_ext_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_extensions']), + 'file_ext_to_include': self.na_helper.safe_get(record, ['scope', 'include_extensions']), + 'paths_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_paths']), + 'scan_readonly_volumes': self.na_helper.safe_get(record, ['scope', 'scan_readonly_volumes']), + 'only_execute_access': self.na_helper.safe_get(record, ['scope', 'only_execute_access']) + } + return None + + def get_svm_uuid(self): + uuid, error = rest_vserver.get_vserver_uuid(self.rest_api, self.parameters['vserver'], self.module, True) + return uuid + + def create_on_access_policy_rest(self): + api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid + body = {'name': self.parameters['policy_name']} + body.update(self.form_create_or_modify_body(self.parameters)) + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error))) + + def modify_on_access_policy_rest(self, modify): + api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid + body = self.form_create_or_modify_body(modify) + dummy, error = rest_generic.patch_async(self.rest_api, api, self.parameters['policy_name'], body) + if error: + self.module.fail_json(msg='Error Modifying Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error))) + + def form_create_or_modify_body(self, params): + body = {} + if params.get('is_scan_mandatory') is not None: + body['mandatory'] = params['is_scan_mandatory'] + if params.get('policy_status') is not None: + body['enabled'] = params['policy_status'] + if params.get('max_file_size'): + body['scope.max_file_size'] = params['max_file_size'] + if params.get('scan_files_with_no_ext') is not None: + body['scope.scan_without_extension'] = params['scan_files_with_no_ext'] + if 'file_ext_to_exclude' in params: + body['scope.exclude_extensions'] = params['file_ext_to_exclude'] + if 'file_ext_to_include' in params: + body['scope.include_extensions'] = params['file_ext_to_include'] + if 'paths_to_exclude' in params: + body['scope.exclude_paths'] = params['paths_to_exclude'] + if params.get('scan_readonly_volumes') is not None: + body['scope.scan_readonly_volumes'] = params['scan_readonly_volumes'] + if params.get('only_execute_access') is not None: + body['scope.only_execute_access'] = params['only_execute_access'] + return body + + def delete_on_access_policy_rest(self): + api = "protocols/vscan/%s/on-access-policies" % self.svm_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['policy_name']) + if error: + self.module.fail_json(msg='Error Deleting Vscan on Access Policy %s: %s' % + (self.parameters['policy_name'], to_native(error))) + + def apply(self): + modify_policy_state, modify = None, None + current = self.get_on_access_policy() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + # enable/disable policy handled in single modify api with REST. + if not self.use_rest and modify.get('policy_status') is not None: + modify_policy_state = True + # policy cannot be deleted unless its disabled, so disable it before delete. + if cd_action == 'delete' and current['policy_status'] is True and self.parameters.get('policy_status') is False: + modify_policy_state = True + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_on_access_policy() + # by default newly created policy will be in disabled state, enable if policy_status is set in ZAPI. + # REST enable policy on create itself. + if not self.use_rest and self.parameters.get('policy_status'): + modify_policy_state = True + if modify_policy_state: + self.status_modify_on_access_policy() + if cd_action == 'delete': + self.delete_on_access_policy() + if modify: + self.modify_on_access_policy(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify, + extra_responses={'modify_policy_state': modify_policy_state}) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapVscanOnAccessPolicy() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py new file mode 100644 index 000000000..b8391fa3b --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_on_demand_task.py @@ -0,0 +1,407 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: na_ontap_vscan_on_demand_task +short_description: NetApp ONTAP Vscan on demand task configuration. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Configure on demand task for Vscan +options: + state: + description: + - Whether a Vscan on demand task is present or not + choices: ['present', 'absent'] + type: str + default: present + + vserver: + description: + - the name of the data vserver to use. + required: true + type: str + + cross_junction: + description: + - Specifies whether the On-Demand task is allowed to cross volume junctions + - This option is not supported with REST. + - This option defaults to False for ZAPI. + type: bool + + directory_recursion: + description: + - Specifies whether the On-Demand task is allowed to recursively scan through sub-directories. + - This option is not supported with REST. + - This option defaults to False for ZAPI. + type: bool + + file_ext_to_exclude: + description: + - File-Extensions for which scanning must not be performed. + - File whose extension matches with both inclusion and exclusion list is not considered for scanning. + type: list + elements: str + + file_ext_to_include: + description: + - File extensions for which scanning is considered. + - The default value is '*', which means that all files are considered for scanning except those which are excluded from scanning. + - File whose extension matches with both inclusion and exclusion list is not considered for scanning. + type: list + elements: str + + max_file_size: + description: + - Max file-size (in bytes) allowed for scanning. The default value of 10737418240 (10GB) is taken if not provided at the time of creating a task. + type: int + + paths_to_exclude: + description: + - File-paths for which scanning must not be performed. + type: list + elements: str + + report_directory: + description: + - Path from the vserver root where task report is created. The path must be a directory and provided in unix-format from the root of the Vserver. + - Example /vol1/on-demand-reports. + type: str + + report_log_level: + description: + - Log level for the On-Demand report. + - This option is not supported with REST. + - This option defaults to 'error' for ZAPI. + choices: ['verbose', 'info', 'error'] + type: str + + request_timeout: + description: + - Total request-service time-limit in seconds. If the virus-scanner does not respond within the provided time, scan will be timedout. + - This option is not supported with REST. + type: str + + scan_files_with_no_ext: + description: + - Specifies whether files without any extension are considered for scanning or not. + type: bool + default: True + + scan_paths: + description: + - List of paths that need to be scanned. The path must be provided in unix-format and from the root of the Vserver. + - Example /vol1/large_files. + type: list + elements: str + + scan_priority: + description: + - Priority of the On-Demand scan requests generated by this task. + - This option is not supported with REST. + - This option default to 'low' for ZAPI + choices: ['low', 'normal'] + type: str + + schedule: + description: + - Schedule of the task. The task will be run as per the schedule. + - For running the task immediately, vscan-on-demand-task-run api must be used after creating a task. + type: str + + task_name: + description: + - Name of the task. + type: str + required: True +''' + +EXAMPLES = """ + - name: Create Vscan On Demand Task + netapp.ontap.na_ontap_vscan_on_demand_task: + state: present + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + task_name: carchiOnDemand + scan_paths: / + report_directory: / + file_ext_to_exclude: ['py', 'yml'] + max_file_size: 10737418241 + paths_to_exclude: ['/tmp', '/var'] + report_log_level: info + request_timeout: 60 + + - name: Delete Vscan On Demand Task + netapp.ontap.na_ontap_vscan_on_demand_task: + state: absent + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + task_name: carchiOnDemand +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapVscanOnDemandTask: + def __init__(self): + self.svm_uuid = None + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + cross_junction=dict(required=False, type='bool'), + directory_recursion=dict(required=False, type='bool'), + file_ext_to_exclude=dict(required=False, type='list', elements='str'), + file_ext_to_include=dict(required=False, type='list', elements='str'), + max_file_size=dict(required=False, type="int"), + paths_to_exclude=dict(required=False, type='list', elements='str'), + report_directory=dict(required=False, type='str'), + report_log_level=dict(required=False, type='str', choices=['verbose', 'info', 'error']), + request_timeout=dict(required=False, type='str'), + scan_files_with_no_ext=dict(required=False, type='bool', default=True), + scan_paths=dict(required=False, type='list', elements='str'), + scan_priority=dict(required=False, type='str', choices=['low', 'normal']), + schedule=dict(required=False, type="str"), + task_name=dict(required=True, type="str") + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + required_if=[ + ["state", "present", ["report_directory", "scan_paths"]] + ] + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + unsupported_rest_properties = ['cross_junction', 'directory_recursion', 'report_log_level', 'request_timeout', + 'scan_priority'] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, unsupported_rest_properties) + if not self.use_rest: + if self.parameters.get('cross_junction') is None: + self.parameters['cross_junction'] = False + if self.parameters.get('directory_recursion') is None: + self.parameters['directory_recursion'] = False + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def get_demand_task(self): + """ + Get a demand task + :return: A vscan-on-demand-task-info or None + """ + if self.use_rest: + self.get_svm_uuid() + return self.get_demand_task_rest() + demand_task_iter = netapp_utils.zapi.NaElement("vscan-on-demand-task-get-iter") + demand_task_info = netapp_utils.zapi.NaElement("vscan-on-demand-task-info") + demand_task_info.add_new_child('task-name', self.parameters['task_name']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(demand_task_info) + demand_task_iter.add_child_elem(query) + try: + result = self.server.invoke_successfully(demand_task_iter, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error searching for Vscan on demand task %s: %s' % + (self.parameters['task_name'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + return result.get_child_by_name('attributes-list').get_child_by_name('vscan-on-demand-task-info') + return None + + def create_demand_task(self): + """ + Create a Demand Task + :return: None + """ + if self.use_rest: + return self.create_demand_task_rest() + demand_task_obj = netapp_utils.zapi.NaElement("vscan-on-demand-task-create") + # Required items first + demand_task_obj.add_new_child('report-directory', self.parameters['report_directory']) + demand_task_obj.add_new_child('task-name', self.parameters['task_name']) + scan_paths = netapp_utils.zapi.NaElement("scan-paths") + for scan_path in self.parameters['scan_paths']: + scan_paths.add_new_child('string', scan_path) + demand_task_obj.add_child_elem(scan_paths) + # Optional items next + if self.parameters.get('cross_junction'): + demand_task_obj.add_new_child('cross-junction', str(self.parameters['cross_junction']).lower()) + if self.parameters.get('directory_recursion'): + demand_task_obj.add_new_child('directory-recursion', str(self.parameters['directory_recursion']).lower()) + if self.parameters.get('file_ext_to_exclude'): + ext_to_exclude_obj = netapp_utils.zapi.NaElement('file-ext-to-exclude') + for exclude_file in self.parameters['file_ext_to_exclude']: + ext_to_exclude_obj.add_new_child('file-extension', exclude_file) + demand_task_obj.add_child_elem(ext_to_exclude_obj) + if self.parameters.get('file_ext_to_include'): + ext_to_include_obj = netapp_utils.zapi.NaElement('file-ext-to-include') + for include_file in self.parameters['file_ext_to_exclude']: + ext_to_include_obj.add_child_elem(include_file) + demand_task_obj.add_child_elem(ext_to_include_obj) + if self.parameters.get('max_file_size'): + demand_task_obj.add_new_child('max-file-size', str(self.parameters['max_file_size'])) + if self.parameters.get('paths_to_exclude'): + exclude_paths = netapp_utils.zapi.NaElement('paths-to-exclude') + for path in self.parameters['paths_to_exclude']: + exclude_paths.add_new_child('string', path) + demand_task_obj.add_child_elem(exclude_paths) + if self.parameters.get('report_log_level'): + demand_task_obj.add_new_child('report-log-level', self.parameters['report_log_level']) + if self.parameters.get('request_timeout'): + demand_task_obj.add_new_child('request-timeout', self.parameters['request_timeout']) + if self.parameters.get('scan_files_with_no_ext'): + demand_task_obj.add_new_child('scan-files-with-no-ext', + str(self.parameters['scan_files_with_no_ext']).lower()) + if self.parameters.get('scan_priority'): + demand_task_obj.add_new_child('scan-priority', self.parameters['scan_priority'].lower()) + if self.parameters.get('schedule'): + demand_task_obj.add_new_child('schedule', self.parameters['schedule']) + try: + self.server.invoke_successfully(demand_task_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating on demand task %s: %s' % + (self.parameters['task_name'], to_native(error)), + exception=traceback.format_exc()) + + def delete_demand_task(self): + """ + Delete a Demand Task" + :return: + """ + if self.use_rest: + return self.delete_demand_task_rest() + demand_task_obj = netapp_utils.zapi.NaElement('vscan-on-demand-task-delete') + demand_task_obj.add_new_child('task-name', self.parameters['task_name']) + try: + self.server.invoke_successfully(demand_task_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting on demand task, %s: %s' % + (self.parameters['task_name'], to_native(error)), + exception=traceback.format_exc()) + + def get_svm_uuid(self): + api = 'svm/svms' + query = {'name': self.parameters['vserver']} + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching svm uuid: %s' % to_native(error)) + if not record: + self.module.fail_json(msg='Could not find svm uuid for %s' % self.parameters['vserver']) + self.svm_uuid = record['uuid'] + + def get_demand_task_rest(self): + api = 'protocols/vscan/%s/on-demand-policies' % self.svm_uuid + query = {'name': self.parameters['task_name'], + 'fields': 'scope.exclude_extensions,' + 'scope.include_extensions,' + 'scope.max_file_size,' + 'scope.exclude_paths,' + 'log_path,' + 'scope.scan_without_extension,' + 'scan_paths,' + 'schedule.name,' + 'name' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg='Error fetching on demand task %s: %s' % (self.parameters['task_name'], to_native(error))) + if record: + return self.format_on_demand_task(record) + return None + + def format_on_demand_task(self, record): + return { + 'task_name': record['name'], + 'file_ext_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_extensions']), + 'file_ext_to_include': self.na_helper.safe_get(record, ['scope', 'include_extensions']), + 'max_file_size': self.na_helper.safe_get(record, ['scope', 'max_file_size']), + 'paths_to_exclude': self.na_helper.safe_get(record, ['scope', 'exclude_paths']), + 'report_directory': self.na_helper.safe_get(record, ['log_path']), + 'scan_files_with_no_ext': self.na_helper.safe_get(record, ['scope', 'scan_without_extension']), + 'scan_paths': self.na_helper.safe_get(record, ['scan_paths']), + 'schedule': self.na_helper.safe_get(record, ['schedule', 'name']), + } + + def create_demand_task_rest(self): + api = 'protocols/vscan/%s/on-demand-policies' % self.svm_uuid + body = { + 'name': self.parameters['task_name'], + 'log_path': self.parameters['report_directory'], + 'scan_paths': self.parameters['scan_paths'], + } + if self.parameters.get('file_ext_to_exclude'): + body['scope.exclude_extensions'] = self.parameters['file_ext_to_exclude'] + if self.parameters.get('file_ext_to_include'): + body['scope.include_extensions'] = self.parameters['file_ext_to_include'] + if self.parameters.get('max_file_size'): + body['scope.max_file_size'] = self.parameters['max_file_size'] + if self.parameters.get('paths_to_exclude'): + body['scope.exclude_paths'] = self.parameters['paths_to_exclude'] + if self.parameters.get('scan_files_with_no_ext'): + body['scope.scan_without_extension'] = self.parameters['scan_files_with_no_ext'] + if self.parameters.get('schedule'): + body['schedule.name'] = self.parameters['schedule'] + dummy, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg='Error creating on demand task %s: %s' % (self.parameters['task_name'], to_native(error))) + + def delete_demand_task_rest(self): + api = 'protocols/vscan/%s/on-demand-policies' % self.svm_uuid + dummy, error = rest_generic.delete_async(self.rest_api, api, self.parameters['task_name']) + if error: + self.module.fail_json(msg='Error deleting on demand task %s: %s' % (self.parameters['task_name'], to_native(error))) + + def apply(self): + current = self.get_demand_task() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_demand_task() + elif cd_action == 'delete': + self.delete_demand_task() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapVscanOnDemandTask() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py new file mode 100644 index 000000000..20e480637 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vscan_scanner_pool.py @@ -0,0 +1,297 @@ +#!/usr/bin/python + +# (c) 2018-2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_vscan_scanner_pool +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: na_ontap_vscan_scanner_pool +short_description: NetApp ONTAP Vscan Scanner Pools Configuration. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: 2.8.0 +author: NetApp Ansible Team (@carchi8py) +description: +- Create/Modify/Delete a Vscan Scanner Pool +options: + state: + description: + - Whether a Vscan Scanner pool is present or not + choices: ['present', 'absent'] + type: str + default: present + + vserver: + description: + - the name of the data vserver to use. + required: true + type: str + + hostnames: + description: + - List of hostnames of Vscan servers which are allowed to connect to Data ONTAP + type: list + elements: str + + privileged_users: + description: + - List of privileged usernames. Username must be in the form "domain-name\\user-name" + type: list + elements: str + + scanner_pool: + description: + - the name of the virus scanner pool + required: true + type: str + + scanner_policy: + description: + - The name of the Virus scanner Policy + choices: ['primary', 'secondary', 'idle'] + type: str +''' + +EXAMPLES = """ +- name: Create and enable Scanner pool + na_ontap_vscan_scanner_pool: + state: present + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + hostnames: ['name', 'name2'] + privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi'] + scanner_pool: Scanner1 + scanner_policy: primary + +- name: Modify scanner pool + na_ontap_vscan_scanner_pool: + state: present + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + hostnames: ['name', 'name2', 'name3'] + privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi', 'sim.rtp.openeng.netapp.com\\chuyic'] + scanner_pool: Scanner1 + +- name: Delete a scanner pool + na_ontap_vscan_scanner_pool: + state: absent + username: '{{ netapp_username }}' + password: '{{ netapp_password }}' + hostname: '{{ netapp_hostname }}' + vserver: carchi-vsim2 + scanner_pool: Scanner1 +""" + +RETURN = """ + +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + +HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() + + +class NetAppOntapVscanScannerPool(object): + ''' create, modify, delete vscan scanner pool ''' + def __init__(self): + self.use_rest = False + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + hostnames=dict(required=False, type='list', elements='str'), + privileged_users=dict(required=False, type='list', elements='str'), + scanner_pool=dict(required=True, type='str'), + scanner_policy=dict(required=False, type='str', choices=['primary', 'secondary', 'idle']) + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = OntapRestAPI(self.module) + if HAS_NETAPP_LIB is False: + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def create_scanner_pool(self): + """ + Create a Vscan Scanner Pool + :return: nothing + """ + scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create') + if self.parameters['hostnames']: + string_obj = netapp_utils.zapi.NaElement('hostnames') + scanner_pool_obj.add_child_elem(string_obj) + for hostname in self.parameters['hostnames']: + string_obj.add_new_child('string', hostname) + if self.parameters['privileged_users']: + users_obj = netapp_utils.zapi.NaElement('privileged-users') + scanner_pool_obj.add_child_elem(users_obj) + for user in self.parameters['privileged_users']: + users_obj.add_new_child('privileged-user', user) + scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool']) + try: + self.server.invoke_successfully(scanner_pool_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' % + (self.parameters['scanner_policy'], to_native(error)), + exception=traceback.format_exc()) + + def apply_policy(self): + """ + Apply a Scanner policy to a Scanner pool + :return: nothing + """ + apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy') + apply_policy_obj.add_new_child('scanner-policy', self.parameters['scanner_policy']) + apply_policy_obj.add_new_child('scanner-pool', self.parameters['scanner_pool']) + try: + self.server.invoke_successfully(apply_policy_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error appling policy %s to pool %s: %s' % + (self.parameters['scanner_policy'], self.parameters['scanner_policy'], to_native(error)), + exception=traceback.format_exc()) + + def get_scanner_pool(self): + """ + Check to see if a scanner pool exist or not + :return: True if it exist, False if it does not + """ + return_value = None + if self.use_rest: + pass + else: + scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter') + scanner_pool_info = netapp_utils.zapi.NaElement('vscan-scanner-pool-info') + scanner_pool_info.add_new_child('scanner-pool', self.parameters['scanner_pool']) + scanner_pool_info.add_new_child('vserver', self.parameters['vserver']) + query = netapp_utils.zapi.NaElement('query') + query.add_child_elem(scanner_pool_info) + scanner_pool_obj.add_child_elem(query) + try: + result = self.server.invoke_successfully(scanner_pool_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' % + (self.parameters['scanner_pool'], to_native(error)), exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: + if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content( + 'scanner-pool') == self.parameters['scanner_pool']: + scanner_pool_obj = result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info') + hostname = [host.get_content() for host in + scanner_pool_obj.get_child_by_name('hostnames').get_children()] + privileged_users = [user.get_content() for user in + scanner_pool_obj.get_child_by_name('privileged-users').get_children()] + return_value = { + 'hostnames': hostname, + 'enable': scanner_pool_obj.get_child_content('is-currently-active'), + 'privileged_users': privileged_users, + 'scanner_pool': scanner_pool_obj.get_child_content('scanner-pool'), + 'scanner_policy': scanner_pool_obj.get_child_content('scanner-policy') + } + return return_value + + def delete_scanner_pool(self): + """ + Delete a Scanner pool + :return: nothing + """ + scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete') + scanner_pool_obj.add_new_child('scanner-pool', self.parameters['scanner_pool']) + try: + self.server.invoke_successfully(scanner_pool_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' % + (self.parameters['scanner_pool'], to_native(error)), + exception=traceback.format_exc()) + + def modify_scanner_pool(self, modify): + """ + Modify a scanner pool + :return: nothing + """ + vscan_pool_modify = netapp_utils.zapi.NaElement('vscan-scanner-pool-modify') + vscan_pool_modify.add_new_child('scanner-pool', self.parameters['scanner_pool']) + for key in modify: + if key == 'privileged_users': + users_obj = netapp_utils.zapi.NaElement('privileged-users') + vscan_pool_modify.add_child_elem(users_obj) + for user in modify['privileged_users']: + users_obj.add_new_child('privileged-user', user) + elif key == 'hostnames': + string_obj = netapp_utils.zapi.NaElement('hostnames') + vscan_pool_modify.add_child_elem(string_obj) + for hostname in modify['hostnames']: + string_obj.add_new_child('string', hostname) + elif key != 'scanner_policy': + vscan_pool_modify.add_new_child(self.attribute_to_name(key), str(modify[key])) + + try: + self.server.invoke_successfully(vscan_pool_modify, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error modifying Vscan Scanner Pool %s: %s' % + (self.parameters['scanner_pool'], to_native(error)), + exception=traceback.format_exc()) + + @staticmethod + def attribute_to_name(attribute): + return str.replace(attribute, '_', '-') + + def apply(self): + scanner_pool_obj = self.get_scanner_pool() + cd_action = self.na_helper.get_cd_action(scanner_pool_obj, self.parameters) + modify = None + if self.parameters['state'] == 'present' and cd_action is None: + modify = self.na_helper.get_modified_attributes(scanner_pool_obj, self.parameters) + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_scanner_pool() + if self.parameters.get('scanner_policy') is not None: + self.apply_policy() + elif cd_action == 'delete': + self.delete_scanner_pool() + elif modify: + self.modify_scanner_pool(modify) + if self.parameters.get('scanner_policy') is not None: + self.apply_policy() + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppOntapVscanScannerPool() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py new file mode 100644 index 000000000..fc3dc3bed --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_audit.py @@ -0,0 +1,373 @@ +#!/usr/bin/python + +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_vserver_audit +short_description: NetApp Ontap - create, delete or modify vserver audit configuration. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.3.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Create, delete or modify vserver audit configuration. +options: + state: + description: + - Whether the specified vserver audit configuration should exist or not. + choices: ['present', 'absent'] + default: present + type: str + vserver: + description: + - Specifies name of the Vserver. + required: true + type: str + log_path: + description: + - The audit log destination path where consolidated audit logs are stored. + type: str + guarantee: + description: + - Indicates whether there is a strict Guarantee of Auditing. + - This option requires ONTAP 9.10.1 or later. + type: bool + enabled: + description: + - Specifies whether or not auditing is enabled on the SVM. + type: bool + events: + description: + - Specifies events for which auditing is enabled on the SVM. + type: dict + suboptions: + authorization_policy: + description: + - Authorization policy change events. + type: bool + cap_staging: + description: + - Central access policy staging events. + type: bool + cifs_logon_logoff: + description: + - CIFS logon and logoff events. + type: bool + file_operations: + description: + - File operation events. + type: bool + file_share: + description: + - File share category events. + type: bool + security_group: + description: + - Local security group management events. + type: bool + user_account: + description: + - Local user account management events. + type: bool + log: + description: + - Specifies events for which auditing is enabled on the SVM. + type: dict + suboptions: + format: + description: + - This option describes the format in which the logs are generated by consolidation process. + Possible values are, + - xml - Data ONTAP-specific XML log format + - evtx - Microsoft Windows EVTX log format + choices: ['xml', 'evtx'] + type: str + retention: + description: + - This option describes the count and time to retain the audit log file. + type: dict + suboptions: + count: + description: + - Determines how many audit log files to retain before rotating the oldest log file out. + - This is mutually exclusive with duration. + type: int + duration: + description: + - Specifies an ISO-8601 format date and time to retain the audit log file. + - The audit log files are deleted once they reach the specified date/time. + - This is mutually exclusive with count. + type: str + rotation: + description: + - Audit event log files are rotated when they reach a configured threshold log size or are on a configured schedule. + - When an event log file is rotated, the scheduled consolidation task first renames the active converted file to a time-stamped archive file, + and then creates a new active converted event log file. + type: dict + suboptions: + size: + description: + - Rotates logs based on log size in bytes. + - Default value is 104857600. + type: int + +notes: + - This module supports REST only. + - At least one event should be enabled. + - No other fields can be specified when enabled is specified for modify. +""" + +EXAMPLES = """ + + - name: Create vserver audit configuration + netapp.ontap.na_ontap_vserver_audit: + state: present + vserver: ansible + enabled: True + events: + authorization_policy: False + cap_staging: False + cifs_logon_logoff: True + file_operations: True + file_share: False + security_group: False + user_account: False + log_path: "/" + log: + format: xml + retention: + count: 4 + rotation: + size: "1048576" + guarantee: False + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify vserver audit configuration + netapp.ontap.na_ontap_vserver_audit: + state: present + vserver: ansible + enabled: True + events: + authorization_policy: True + cap_staging: True + cifs_logon_logoff: True + file_operations: True + file_share: True + security_group: True + user_account: True + log_path: "/tmp" + log: + format: evtx + retention: + count: 5 + rotation: + size: "104857600" + guarantee: True + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete vserver audit configuration + netapp.ontap.na_ontap_vserver_audit: + state: absent + vserver: ansible + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +import time +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPVserverAudit: + """ + Class with vserver audit configuration methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + enabled=dict(required=False, type='bool'), + guarantee=dict(required=False, type='bool'), + log_path=dict(required=False, type='str'), + log=dict(type='dict', options=dict( + format=dict(type='str', choices=['xml', 'evtx']), + retention=dict(type='dict', options=dict( + count=dict(type='int'), + duration=dict(type='str'), + )), + rotation=dict(type='dict', options=dict( + size=dict(type='int'), + )), + )), + events=dict(type='dict', options=dict( + authorization_policy=dict(type='bool'), + cap_staging=dict(type='bool'), + cifs_logon_logoff=dict(type='bool'), + file_operations=dict(type='bool'), + file_share=dict(type='bool'), + security_group=dict(type='bool'), + user_account=dict(type='bool'), + )) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.filter_out_none_entries(self.na_helper.set_parameters(self.module.params)) + + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_vserver_audit', 9, 6) + partially_supported_rest_properties = [['guarantee', (9, 10, 1)]] + self.use_rest = self.rest_api.is_rest_supported_properties(self.parameters, None, partially_supported_rest_properties) + self.svm_uuid = None + if 'events' in self.parameters and self.parameters['state'] == 'present': + if all(self.parameters['events'][value] is False for value in self.parameters['events']) is True: + self.module.fail_json(msg="Error: At least one event should be enabled") + + def get_vserver_audit_configuration_rest(self): + """ + Retrieves audit configurations. + """ + api = "protocols/audit" + query = { + 'svm.name': self.parameters['vserver'], + 'fields': 'svm.uuid,enabled,events,log,log_path,' + } + if self.rest_api.meets_rest_minimum_version(self.use_rest, 9, 10, 1): + query['fields'] += 'guarantee,' + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching vserver audit configuration: %s" % error) + if record: + self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid']) + return { + 'enabled': self.na_helper.safe_get(record, ['enabled']), + 'events': self.na_helper.safe_get(record, ['events']), + 'log': self.na_helper.safe_get(record, ['log']), + 'log_path': self.na_helper.safe_get(record, ['log_path']), + 'guarantee': record.get('guarantee', False), + } + return record + + def create_vserver_audit_config_body_rest(self): + """ + Vserver audit config body for create and modify with rest API. + """ + body = {} + if 'events' in self.parameters: + body['events'] = self.parameters['events'] + if 'guarantee' in self.parameters: + body['guarantee'] = self.parameters['guarantee'] + if self.na_helper.safe_get(self.parameters, ['log', 'retention', 'count']): + body['log.retention.count'] = self.parameters['log']['retention']['count'] + if self.na_helper.safe_get(self.parameters, ['log', 'retention', 'duration']): + body['log.retention.duration'] = self.parameters['log']['retention']['duration'] + if self.na_helper.safe_get(self.parameters, ['log', 'rotation', 'size']): + body['log.rotation.size'] = self.parameters['log']['rotation']['size'] + if self.na_helper.safe_get(self.parameters, ['log', 'format']): + body['log.format'] = self.parameters['log']['format'] + if 'log_path' in self.parameters: + body['log_path'] = self.parameters['log_path'] + return body + + def create_vserver_audit_configuration_rest(self): + """ + Creates an audit configuration. + """ + api = "protocols/audit" + body = self.create_vserver_audit_config_body_rest() + if 'vserver' in self.parameters: + body['svm.name'] = self.parameters.get('vserver') + if 'enabled' in self.parameters: + body['enabled'] = self.parameters['enabled'] + record, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error on creating vserver audit configuration: %s" % error) + + def delete_vserver_audit_configuration_rest(self, current): + """ + Deletes an audit configuration. + """ + api = "protocols/audit/%s" % self.svm_uuid + if current['enabled'] is True: + modify = {'enabled': False} + self.modify_vserver_audit_configuration_rest(modify) + current = self.get_vserver_audit_configuration_rest() + retry = 2 + while retry > 0: + record, error = rest_generic.delete_async(self.rest_api, api, None) + # Delete throws retry after sometime error during first run by default, hence retrying after sometime. + if error and '9699350' in error: + time.sleep(120) + retry -= 1 + elif error: + self.module.fail_json(msg="Error on deleting vserver audit configuration: %s" % error) + else: + return + + def modify_vserver_audit_configuration_rest(self, modify): + """ + Updates audit configuration. + """ + body = {} + if 'enabled' in modify: + body['enabled'] = modify['enabled'] + else: + body = self.create_vserver_audit_config_body_rest() + api = "protocols/audit" + record, error = rest_generic.patch_async(self.rest_api, api, self.svm_uuid, body) + if error: + self.module.fail_json(msg="Error on modifying vserver audit configuration: %s" % error) + + def apply(self): + current = self.get_vserver_audit_configuration_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_vserver_audit_configuration_rest() + elif cd_action == 'delete': + self.delete_vserver_audit_configuration_rest(current) + elif modify: + # No other fields can be specified when enabled is specified for modify + if 'enabled' in modify: + self.modify_vserver_audit_configuration_rest(modify) + modify.pop('enabled') + if modify: + # This method will be called to modify fields other than enabled + self.modify_vserver_audit_configuration_rest(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap vserver audit configuration object and runs the correct play task + """ + obj = NetAppONTAPVserverAudit() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py new file mode 100644 index 000000000..35eaf18c9 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_cifs_security.py @@ -0,0 +1,310 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = ''' +--- +module: na_ontap_vserver_cifs_security +short_description: NetApp ONTAP vserver CIFS security modification +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +version_added: 2.9.0 +author: NetApp Ansible Team (@carchi8py) + +description: + - modify vserver CIFS security. + +options: + + vserver: + description: + - name of the vserver. + required: true + type: str + + kerberos_clock_skew: + description: + - The clock skew in minutes is the tolerance for accepting tickets with time stamps that do not exactly match the host's system clock. + type: int + + kerberos_ticket_age: + description: + - Determine the maximum amount of time in hours that a user's ticket may be used for the purpose of Kerberos authentication. + type: int + + kerberos_renew_age: + description: + - Determine the maximum amount of time in days for which a ticket can be renewed. + type: int + + kerberos_kdc_timeout: + description: + - Determine the timeout value in seconds for KDC connections. + type: int + + is_signing_required: + description: + - Determine whether signing is required for incoming CIFS traffic. + type: bool + + is_password_complexity_required: + description: + - Determine whether password complexity is required for local users. + type: bool + + is_aes_encryption_enabled: + description: + - Determine whether AES-128 and AES-256 encryption mechanisms are enabled for Kerberos-related CIFS communication. + type: bool + + is_smb_encryption_required: + description: + - Determine whether SMB encryption is required for incoming CIFS traffic. + type: bool + + lm_compatibility_level: + description: + - Determine the LM compatibility level. + choices: ['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb'] + type: str + + referral_enabled_for_ad_ldap: + description: + - Determine whether LDAP referral chasing is enabled or not for AD LDAP connections. + type: bool + + session_security_for_ad_ldap: + description: + - Determine the level of security required for LDAP communications. + choices: ['none', 'sign', 'seal'] + type: str + + smb1_enabled_for_dc_connections: + description: + - Determine if SMB version 1 is used for connections to domain controllers. + choices: ['false', 'true', 'system_default'] + type: str + + smb2_enabled_for_dc_connections: + description: + - Determine if SMB version 2 is used for connections to domain controllers. + choices: ['false', 'true', 'system_default'] + type: str + + use_start_tls_for_ad_ldap: + description: + - Determine whether to use start_tls for AD LDAP connections. + type: bool + + encryption_required_for_dc_connections: + description: + - Specifies whether encryption is required for domain controller connections. + type: bool + version_added: 21.20.0 + + use_ldaps_for_ad_ldap: + description: + - Determine whether to use LDAPS for secure Active Directory LDAP connections. + type: bool + version_added: 21.20.0 + +''' + +EXAMPLES = ''' + - name: modify cifs security + netapp.ontap.na_ontap_vserver_cifs_security: + hostname: "{{ hostname }}" + username: username + password: password + vserver: ansible + is_aes_encryption_enabled: false + lm_compatibility_level: lm_ntlm_ntlmv2_krb + smb1_enabled_for_dc_connections: system_default + smb2_enabled_for_dc_connections: system_default + use_start_tls_for_ad_ldap: false + referral_enabled_for_ad_ldap: false + session_security_for_ad_ldap: none + is_signing_required: false + is_password_complexity_required: false + encryption_required_for_dc_connections: false + use_ldaps_for_ad_ldap: false + + - name: modify cifs security is_smb_encryption_required + netapp.ontap.na_ontap_vserver_cifs_security: + hostname: "{{ hostname }}" + username: username + password: password + vserver: ansible + is_smb_encryption_required: false + + - name: modify cifs security int options + netapp.ontap.na_ontap_vserver_cifs_security: + hostname: "{{ hostname }}" + username: username + password: password + vserver: ansible + kerberos_clock_skew: 10 + kerberos_ticket_age: 10 + kerberos_renew_age: 5 + kerberos_kdc_timeout: 3 +''' + +RETURN = ''' +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule + + +class NetAppONTAPCifsSecurity(object): + ''' + modify vserver cifs security + ''' + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + vserver=dict(required=True, type='str'), + kerberos_clock_skew=dict(required=False, type='int'), + kerberos_ticket_age=dict(required=False, type='int'), + kerberos_renew_age=dict(required=False, type='int'), + kerberos_kdc_timeout=dict(required=False, type='int'), + is_signing_required=dict(required=False, type='bool'), + is_password_complexity_required=dict(required=False, type='bool'), + is_aes_encryption_enabled=dict(required=False, type='bool'), + is_smb_encryption_required=dict(required=False, type='bool'), + lm_compatibility_level=dict(required=False, choices=['lm_ntlm_ntlmv2_krb', 'ntlm_ntlmv2_krb', 'ntlmv2_krb', 'krb']), + referral_enabled_for_ad_ldap=dict(required=False, type='bool'), + session_security_for_ad_ldap=dict(required=False, choices=['none', 'sign', 'seal']), + smb1_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']), + smb2_enabled_for_dc_connections=dict(required=False, choices=['false', 'true', 'system_default']), + use_start_tls_for_ad_ldap=dict(required=False, type='bool'), + encryption_required_for_dc_connections=dict(required=False, type='bool'), + use_ldaps_for_ad_ldap=dict(required=False, type='bool') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True, + mutually_exclusive=[('use_ldaps_for_ad_ldap', 'use_start_tls_for_ad_ldap')] + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.na_helper.module_replaces('na_ontap_cifs_server', self.module) + msg = 'Error: na_ontap_vserver_cifs_security only supports ZAPI.netapp.ontap.na_ontap_cifs_server should be used instead.' + self.na_helper.fall_back_to_zapi(self.module, msg, self.parameters) + + self.set_playbook_zapi_key_map() + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg="the python NetApp-Lib module is required") + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) + + def set_playbook_zapi_key_map(self): + + self.na_helper.zapi_int_keys = { + 'kerberos_clock_skew': 'kerberos-clock-skew', + 'kerberos_ticket_age': 'kerberos-ticket-age', + 'kerberos_renew_age': 'kerberos-renew-age', + 'kerberos_kdc_timeout': 'kerberos-kdc-timeout' + } + self.na_helper.zapi_bool_keys = { + 'is_signing_required': 'is-signing-required', + 'is_password_complexity_required': 'is-password-complexity-required', + 'is_aes_encryption_enabled': 'is-aes-encryption-enabled', + 'is_smb_encryption_required': 'is-smb-encryption-required', + 'referral_enabled_for_ad_ldap': 'referral-enabled-for-ad-ldap', + 'use_start_tls_for_ad_ldap': 'use-start-tls-for-ad-ldap', + 'encryption_required_for_dc_connections': 'encryption-required-for-dc-connections', + 'use_ldaps_for_ad_ldap': 'use-ldaps-for-ad-ldap' + } + self.na_helper.zapi_str_keys = { + 'lm_compatibility_level': 'lm-compatibility-level', + 'session_security_for_ad_ldap': 'session-security-for-ad-ldap', + 'smb1_enabled_for_dc_connections': 'smb1-enabled-for-dc-connections', + 'smb2_enabled_for_dc_connections': 'smb2-enabled-for-dc-connections' + } + + def cifs_security_get_iter(self): + """ + get current vserver cifs security. + :return: a dict of vserver cifs security + """ + cifs_security_get = netapp_utils.zapi.NaElement('cifs-security-get-iter') + query = netapp_utils.zapi.NaElement('query') + cifs_security = netapp_utils.zapi.NaElement('cifs-security') + cifs_security.add_new_child('vserver', self.parameters['vserver']) + query.add_child_elem(cifs_security) + cifs_security_get.add_child_elem(query) + cifs_security_details = dict() + try: + result = self.server.invoke_successfully(cifs_security_get, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching cifs security from %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0: + cifs_security_info = result.get_child_by_name('attributes-list').get_child_by_name('cifs-security') + for option, zapi_key in self.na_helper.zapi_int_keys.items(): + cifs_security_details[option] = self.na_helper.get_value_for_int(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key)) + for option, zapi_key in self.na_helper.zapi_bool_keys.items(): + cifs_security_details[option] = self.na_helper.get_value_for_bool(from_zapi=True, value=cifs_security_info.get_child_content(zapi_key)) + for option, zapi_key in self.na_helper.zapi_str_keys.items(): + if cifs_security_info.get_child_content(zapi_key) is None: + cifs_security_details[option] = None + else: + cifs_security_details[option] = cifs_security_info.get_child_content(zapi_key) + return cifs_security_details + return None + + def cifs_security_modify(self, modify): + """ + :param modify: A list of attributes to modify + :return: None + """ + cifs_security_modify = netapp_utils.zapi.NaElement('cifs-security-modify') + for attribute in modify: + cifs_security_modify.add_new_child(self.attribute_to_name(attribute), str(self.parameters[attribute])) + try: + self.server.invoke_successfully(cifs_security_modify, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as e: + self.module.fail_json(msg='Error modifying cifs security on %s: %s' + % (self.parameters['vserver'], to_native(e)), + exception=traceback.format_exc()) + + @staticmethod + def attribute_to_name(attribute): + return str.replace(attribute, '_', '-') + + def apply(self): + """Call modify operations.""" + current = self.cifs_security_get_iter() + modify = self.na_helper.get_modified_attributes(current, self.parameters) + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if modify: + self.cifs_security_modify(modify) + result = netapp_utils.generate_result(self.na_helper.changed, modify=modify) + self.module.exit_json(**result) + + +def main(): + obj = NetAppONTAPCifsSecurity() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py new file mode 100644 index 000000000..3c34ccf08 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer.py @@ -0,0 +1,446 @@ +#!/usr/bin/python + +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Create/Delete vserver peer +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap + - netapp.ontap.netapp.na_ontap_peer +module: na_ontap_vserver_peer +options: + state: + choices: ['present', 'absent'] + type: str + description: + - Whether the specified vserver peer should exist or not. + default: present + vserver: + description: + - Specifies name of the source Vserver in the relationship. + required: true + type: str + applications: + type: list + elements: str + description: + - List of applications which can make use of the peering relationship. + - FlexCache supported from ONTAP 9.5 onwards. + peer_vserver: + description: + - Specifies name of the peer Vserver in the relationship. + required: true + type: str + peer_cluster: + description: + - Specifies name of the peer Cluster. + - Required for creating the vserver peer relationship with a remote cluster + type: str + local_name_for_peer: + description: + - Specifies local name of the peer Vserver in the relationship. + - Use this if you see "Error creating vserver peer ... Vserver name conflicts with one of the following". + type: str + local_name_for_source: + description: + - Specifies local name of the source Vserver in the relationship. + - Use this if you see "Error accepting vserver peer ... System generated a name for the peer Vserver because of a naming conflict". + type: str + dest_hostname: + description: + - DEPRECATED - please use C(peer_options). + - Destination hostname or IP address. + - Required for creating the vserver peer relationship with a remote cluster. + type: str + dest_username: + description: + - DEPRECATED - please use C(peer_options). + - Destination username. + - Optional if this is same as source username. + type: str + dest_password: + description: + - DEPRECATED - please use C(peer_options). + - Destination password. + - Optional if this is same as source password. + type: str +short_description: NetApp ONTAP Vserver peering +version_added: 2.7.0 +''' + +EXAMPLES = """ + + - name: Source vserver peer create + netapp.ontap.na_ontap_vserver_peer: + state: present + peer_vserver: ansible2 + peer_cluster: ansibleCluster + local_name_for_peer: peername + local_name_for_source: sourcename + vserver: ansible + applications: ['snapmirror'] + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + peer_options: + hostname: "{{ netapp_dest_hostname }}" + + - name: vserver peer delete + netapp.ontap.na_ontap_vserver_peer: + state: absent + peer_vserver: ansible2 + vserver: ansible + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Source vserver peer create - different credentials + netapp.ontap.na_ontap_vserver_peer: + state: present + peer_vserver: ansible2 + peer_cluster: ansibleCluster + local_name_for_peer: peername + local_name_for_source: sourcename + vserver: ansible + applications: ['snapmirror'] + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + peer_options: + hostname: "{{ netapp_dest_hostname }}" + cert_filepath: "{{ cert_filepath }}" + key_filepath: "{{ key_filepath }}" +""" + +RETURN = """ +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPVserverPeer: + """ + Class with vserver peer methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + peer_vserver=dict(required=True, type='str'), + peer_cluster=dict(required=False, type='str'), + local_name_for_peer=dict(required=False, type='str'), + local_name_for_source=dict(required=False, type='str'), + applications=dict(required=False, type='list', elements='str'), + peer_options=dict(type='dict', options=netapp_utils.na_ontap_host_argument_spec_peer()), + dest_hostname=dict(required=False, type='str'), + dest_username=dict(required=False, type='str'), + dest_password=dict(required=False, type='str', no_log=True) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[ + ['peer_options', 'dest_hostname'], + ['peer_options', 'dest_username'], + ['peer_options', 'dest_password'] + ], + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + if self.parameters.get('dest_hostname') is None and self.parameters.get('peer_options') is None: + self.parameters['dest_hostname'] = self.parameters.get('hostname') + if self.parameters.get('dest_hostname') is not None: + # if dest_hostname is present, peer_options is absent + self.parameters['peer_options'] = dict( + hostname=self.parameters.get('dest_hostname'), + username=self.parameters.get('dest_username'), + password=self.parameters.get('dest_password'), + ) + else: + self.parameters['dest_hostname'] = self.parameters['peer_options']['hostname'] + netapp_utils.setup_host_options_from_module_params( + self.parameters['peer_options'], self.module, + netapp_utils.na_ontap_host_argument_spec_peer().keys()) + # Rest API objects + self.use_rest = False + self.rest_api = OntapRestAPI(self.module) + self.src_use_rest = self.rest_api.is_rest() + self.dst_rest_api = OntapRestAPI(self.module, host_options=self.parameters['peer_options']) + self.dst_use_rest = self.dst_rest_api.is_rest() + self.use_rest = bool(self.src_use_rest and self.dst_use_rest) + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module, host_options=self.parameters['peer_options']) + + def vserver_peer_get_iter(self, target): + """ + Compose NaElement object to query current vserver using remote-vserver-name and vserver parameters. + :return: NaElement object for vserver-get-iter with query + """ + vserver_peer_get = netapp_utils.zapi.NaElement('vserver-peer-get-iter') + query = netapp_utils.zapi.NaElement('query') + vserver_peer_info = netapp_utils.zapi.NaElement('vserver-peer-info') + vserver, remote_vserver = self.get_local_and_peer_vserver(target) + vserver_peer_info.add_new_child('remote-vserver-name', remote_vserver) + vserver_peer_info.add_new_child('vserver', vserver) + query.add_child_elem(vserver_peer_info) + vserver_peer_get.add_child_elem(query) + return vserver_peer_get + + def get_local_and_peer_vserver(self, target): + if target == 'source': + return self.parameters['vserver'], self.parameters['peer_vserver'] + # else for target peer. + return self.parameters['peer_vserver'], self.parameters['vserver'] + + def vserver_peer_get(self, target='source'): + """ + Get current vserver peer info + :return: Dictionary of current vserver peer details if query successful, else return None + """ + if self.use_rest: + return self.vserver_peer_get_rest(target) + + vserver_peer_get_iter = self.vserver_peer_get_iter(target) + vserver_info = {} + try: + if target == 'source': + result = self.server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True) + elif target == 'peer': + result = self.dest_server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching vserver peer %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + # return vserver peer details + if result.get_child_by_name('num-records') and \ + int(result.get_child_content('num-records')) > 0: + vserver_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info') + vserver_info['peer_vserver'] = vserver_peer_info.get_child_content('remote-vserver-name') + vserver_info['vserver'] = vserver_peer_info.get_child_content('vserver') + vserver_info['local_peer_vserver'] = vserver_peer_info.get_child_content('peer-vserver') # required for delete and accept + vserver_info['peer_state'] = vserver_peer_info.get_child_content('peer-state') + return vserver_info + return None + + def vserver_peer_delete(self, current): + """ + Delete a vserver peer + """ + if self.use_rest: + return self.vserver_peer_delete_rest(current) + + vserver_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-peer-delete', **{'peer-vserver': current['local_peer_vserver'], + 'vserver': self.parameters['vserver']}) + try: + self.server.invoke_successfully(vserver_peer_delete, + enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error deleting vserver peer %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def get_peer_cluster_name(self): + """ + Get local cluster name + :return: cluster name + """ + if self.use_rest: + return self.get_peer_cluster_name_rest() + + cluster_info = netapp_utils.zapi.NaElement('cluster-identity-get') + # if remote peer exist , get remote cluster name else local cluster name + server = self.dest_server if self.is_remote_peer() else self.server + try: + result = server.invoke_successfully(cluster_info, enable_tunneling=True) + return result.get_child_by_name('attributes').get_child_by_name( + 'cluster-identity-info').get_child_content('cluster-name') + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s' + % (self.parameters['peer_vserver'], to_native(error)), + exception=traceback.format_exc()) + + def vserver_peer_create(self): + """ + Create a vserver peer + """ + if self.parameters.get('applications') is None: + self.module.fail_json(msg='applications parameter is missing') + if self.parameters.get('peer_cluster') is None: + self.parameters['peer_cluster'] = self.get_peer_cluster_name() + if self.use_rest: + return self.vserver_peer_create_rest() + + vserver_peer_create = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-peer-create', **{'peer-vserver': self.parameters['peer_vserver'], + 'vserver': self.parameters['vserver'], + 'peer-cluster': self.parameters['peer_cluster']}) + if 'local_name_for_peer' in self.parameters: + vserver_peer_create.add_new_child('local-name', self.parameters['local_name_for_peer']) + applications = netapp_utils.zapi.NaElement('applications') + for application in self.parameters['applications']: + applications.add_new_child('vserver-peer-application', application) + vserver_peer_create.add_child_elem(applications) + try: + self.server.invoke_successfully(vserver_peer_create, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error creating vserver peer %s: %s' + % (self.parameters['vserver'], to_native(error)), + exception=traceback.format_exc()) + + def is_remote_peer(self): + return ( + self.parameters.get('dest_hostname') is not None + and self.parameters['dest_hostname'] != self.parameters['hostname'] + ) + + def vserver_peer_accept(self): + """ + Accept a vserver peer at destination + """ + # peer-vserver -> remote (source vserver is provided) + # vserver -> local (destination vserver is provided) + if self.use_rest: + return self.vserver_peer_accept_rest('peer') + vserver_peer_info = self.vserver_peer_get('peer') + if vserver_peer_info is None: + self.module.fail_json(msg='Error retrieving vserver peer information while accepting') + vserver_peer_accept = netapp_utils.zapi.NaElement.create_node_with_children( + 'vserver-peer-accept', **{'peer-vserver': vserver_peer_info['local_peer_vserver'], 'vserver': self.parameters['peer_vserver']}) + if 'local_name_for_source' in self.parameters: + vserver_peer_accept.add_new_child('local-name', self.parameters['local_name_for_source']) + try: + self.dest_server.invoke_successfully(vserver_peer_accept, enable_tunneling=True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error accepting vserver peer %s: %s' + % (self.parameters['peer_vserver'], to_native(error)), + exception=traceback.format_exc()) + + def check_and_report_rest_error(self, error, action, where): + if error: + if "job reported error:" in error and "entry doesn't exist" in error: + # ignore RBAC issue with FSx - BURT1467620 (fixed in 9.11.0) - GitHub #45 + self.module.warn('Ignoring job status, assuming success - Issue #45.') + return + self.module.fail_json(msg='Error %s vserver peer relationship on %s: %s' % (action, where, error)) + + def vserver_peer_accept_rest(self, target): + vserver_peer_info = self.vserver_peer_get_rest('peer') + if not vserver_peer_info: + self.module.fail_json(msg='Error reading vserver peer information on peer %s' % self.parameters['peer_vserver']) + api = 'svm/peers' + body = {"state": "peered"} + if 'local_name_for_source' in self.parameters: + body['name'] = self.parameters['local_name_for_source'] + dummy, error = rest_generic.patch_async(self.dst_rest_api, api, vserver_peer_info['local_peer_vserver_uuid'], body) + self.check_and_report_rest_error(error, 'accepting', self.parameters['peer_vserver']) + + def vserver_peer_get_rest(self, target): + """ + Get current vserver peer info + :return: Dictionary of current vserver peer details if query successful, else return None + """ + api = 'svm/peers' + vserver_info = {} + vserver, remote_vserver = self.get_local_and_peer_vserver(target) + restapi = self.rest_api if target == 'source' else self.dst_rest_api + options = {'svm.name': vserver, 'peer.svm.name': remote_vserver, 'fields': 'name,svm.name,peer.svm.name,state,uuid'} + record, error = rest_generic.get_one_record(restapi, api, options) + if error: + self.module.fail_json(msg='Error fetching vserver peer %s: %s' % (self.parameters['vserver'], error)) + if record is not None: + vserver_info['vserver'] = self.na_helper.safe_get(record, ['svm', 'name']) + vserver_info['peer_vserver'] = self.na_helper.safe_get(record, ['peer', 'svm', 'name']) + vserver_info['peer_state'] = record.get('state') + # required local_peer_vserver_uuid to delete the peer relationship + vserver_info['local_peer_vserver_uuid'] = record.get('uuid') + vserver_info['local_peer_vserver'] = record['name'] + return vserver_info + return None + + def vserver_peer_delete_rest(self, current): + """ + Delete a vserver peer using rest. + """ + dummy, error = rest_generic.delete_async(self.rest_api, 'svm/peers', current['local_peer_vserver_uuid']) + self.check_and_report_rest_error(error, 'deleting', self.parameters['vserver']) + + def get_peer_cluster_name_rest(self): + """ + Get local cluster name + :return: cluster name + """ + api = 'cluster' + options = {'fields': 'name'} + # if remote peer exist , get remote cluster name else local cluster name + restapi = self.dst_rest_api if self.is_remote_peer() else self.rest_api + record, error = rest_generic.get_one_record(restapi, api, options) + if error: + self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s' + % (self.parameters['peer_vserver'], error)) + if record is not None: + return record.get('name') + return None + + def vserver_peer_create_rest(self): + """ + Create a vserver peer using rest + """ + api = 'svm/peers' + params = { + 'svm.name': self.parameters['vserver'], + 'peer.cluster.name': self.parameters['peer_cluster'], + 'peer.svm.name': self.parameters['peer_vserver'], + 'applications': self.parameters['applications'] + } + if 'local_name_for_peer' in self.parameters: + params['name'] = self.parameters['local_name_for_peer'] + dummy, error = rest_generic.post_async(self.rest_api, api, params) + self.check_and_report_rest_error(error, 'creating', self.parameters['vserver']) + + def apply(self): + """ + Apply action to create/delete or accept vserver peer + """ + current = self.vserver_peer_get() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.vserver_peer_create() + # accept only if the peer relationship is on a remote cluster + if self.is_remote_peer(): + self.vserver_peer_accept() + elif cd_action == 'delete': + self.vserver_peer_delete(current) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """Execute action""" + module_obj = NetAppONTAPVserverPeer() + module_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py new file mode 100644 index 000000000..9ed54e96f --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_vserver_peer_permissions.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: na_ontap_vserver_peer_permissions +short_description: NetApp Ontap - create, delete or modify vserver peer permission. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '22.3.0' +author: NetApp Ansible Team (@carchi8py) +description: + - Create, delete or modify vserver peer permission. +options: + state: + description: + - Whether the specified vserver peer permission should exist or not. + choices: ['present', 'absent'] + default: present + type: str + vserver: + description: + - Specifies name of the source Vserver in the relationship. + required: true + type: str + applications: + type: list + elements: str + required: true + description: + - List of applications which can make use of the peering relationship. + - FlexCache supported from ONTAP 9.5 onwards. + cluster_peer: + description: + - Specifies name of the peer Cluster. + type: str + required: true +""" + +EXAMPLES = """ + + - name: Create vserver peer permission for an SVM + netapp.ontap.na_ontap_vserver_peer_permissions: + state: present + vserver: ansible + cluster_peer: test_cluster + applications: ['snapmirror'] + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Modify vserver peer permission for an SVM + netapp.ontap.na_ontap_vserver_peer_permissions: + state: present + vserver: ansible + cluster_peer: test_cluster + applications: ['snapmirror', 'flexcache'] + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete vserver peer permission for an SVM + netapp.ontap.na_ontap_vserver_peer_permissions: + state: absent + vserver: ansible + cluster_peer: test_cluster + applications: ['snapmirror'] + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +""" + +RETURN = """ +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPVserverPeerPermissions: + """ + Class with vserver peer permission methods + """ + + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + vserver=dict(required=True, type='str'), + applications=dict(required=True, type='list', elements='str'), + cluster_peer=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.rest_api.fail_if_not_rest_minimum_version('na_ontap_vserver_peer_permissions', 9, 6) + self.input_validation() + self.svm_uuid = None + self.cluster_peer_uuid = None + + def input_validation(self): + if self.parameters.get('vserver') == '*': + self.module.fail_json(msg='As svm name * represents all svms and created by default, please provide a specific SVM name') + if self.parameters.get('applications') == [''] and self.parameters.get('state') == 'present': + self.module.fail_json(msg='Applications field cannot be empty, at least one application must be specified') + + def get_vserver_peer_permission_rest(self): + """ + Retrieves SVM peer permissions. + """ + api = "svm/peer-permissions" + query = { + 'svm.name': self.parameters['vserver'], + "cluster_peer.name": self.parameters['cluster_peer'], + 'fields': 'svm.uuid,cluster_peer.uuid,applications' + } + record, error = rest_generic.get_one_record(self.rest_api, api, query) + if error: + self.module.fail_json(msg="Error on fetching vserver peer permissions: %s" % error) + if record: + self.svm_uuid = self.na_helper.safe_get(record, ['svm', 'uuid']) + self.cluster_peer_uuid = self.na_helper.safe_get(record, ['cluster_peer', 'uuid']) + return { + 'applications': self.na_helper.safe_get(record, ['applications']), + } + return None + + def create_vserver_peer_permission_rest(self): + """ + Creates an SVM peer permission. + """ + api = "svm/peer-permissions" + body = { + 'svm.name': self.parameters['vserver'], + 'cluster_peer.name': self.parameters['cluster_peer'], + 'applications': self.parameters['applications'] + } + record, error = rest_generic.post_async(self.rest_api, api, body) + if error: + self.module.fail_json(msg="Error on creating vserver peer permissions: %s" % error) + + def delete_vserver_peer_permission_rest(self): + """ + Deletes the SVM peer permissions. + """ + api = "svm/peer-permissions/%s/%s" % (self.cluster_peer_uuid, self.svm_uuid) + record, error = rest_generic.delete_async(self.rest_api, api, None) + if error: + self.module.fail_json(msg="Error on deleting vserver peer permissions: %s" % error) + + def modify_vserver_peer_permission_rest(self, modify): + """ + Updates the SVM peer permissions. + """ + body = {} + if 'applications' in modify: + body['applications'] = self.parameters['applications'] + api = "svm/peer-permissions/%s/%s" % (self.cluster_peer_uuid, self.svm_uuid) + record, error = rest_generic.patch_async(self.rest_api, api, None, body) + if error: + self.module.fail_json(msg="Error on modifying vserver peer permissions: %s" % error) + + def apply(self): + current = self.get_vserver_peer_permission_rest() + cd_action = self.na_helper.get_cd_action(current, self.parameters) + modify = self.na_helper.get_modified_attributes(current, self.parameters) if cd_action is None else None + if self.na_helper.changed and not self.module.check_mode: + if cd_action == 'create': + self.create_vserver_peer_permission_rest() + elif cd_action == 'delete': + self.delete_vserver_peer_permission_rest() + elif modify: + self.modify_vserver_peer_permission_rest(modify) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action) + self.module.exit_json(**result) + + +def main(): + """ + Creates the NetApp Ontap vserver peer permission object and runs the correct play task + """ + obj = NetAppONTAPVserverPeerPermissions() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py new file mode 100644 index 000000000..b834f210d --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wait_for_condition.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +''' +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Loop over an ONTAP get status request until a condition is satisfied. + - Report a timeout error if C(timeout) is exceeded while waiting for the condition. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +module: na_ontap_wait_for_condition +short_description: NetApp ONTAP wait_for_condition. Loop over a get status request until a condition is met. +version_added: 20.8.0 +options: + name: + description: + - The name of the event to check for. + - snapmirror_relationship was added in 21.22.0. + choices: ['snapmirror_relationship', 'sp_upgrade', 'sp_version'] + type: str + required: true + state: + description: + - whether the conditions should be present or absent. + - if C(present), the module exits when any of the conditions is observed. + - if C(absent), the module exits with success when None of the conditions is observed. + choices: ['present', 'absent'] + default: present + type: str + conditions: + description: + - one or more conditions to match + - C(state) and/or C(transfer_state) for C(snapmirror_relationship), + - C(is_in_progress) for C(sp_upgrade), + - C(firmware_version) for C(sp_version). + type: list + elements: str + required: true + polling_interval: + description: + - how ofen to check for the conditions, in seconds. + default: 5 + type: int + timeout: + description: + - how long to wait for the conditions, in seconds. + default: 180 + type: int + attributes: + description: + - a dictionary of custom attributes for the condition. + - C(sp_upgrade), C(sp_version) require C(node). + - C(sp_version) requires C(expected_version). + - C(snapmirror_relationship) requires C(destination_path) and C(expected_state) or C(expected_transfer_state) to match the condition(s). + type: dict +''' + +EXAMPLES = """ + - name: wait for sp_upgrade in progress + netapp.ontap.na_ontap_wait_for_condition: + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: true + validate_certs: no + name: sp_upgrade + conditions: is_in_progress + attributes: + node: "{{ node }}" + polling_interval: 30 + timeout: 1800 + + - name: wait for sp_upgrade not in progress + netapp.ontap.na_ontap_wait_for_condition: + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: true + validate_certs: no + name: sp_upgrade + conditions: is_in_progress + state: absent + attributes: + node: "{{ ontap_admin_ip }}" + polling_interval: 30 + timeout: 1800 + + - name: wait for sp_version to match 3.9 + netapp.ontap.na_ontap_wait_for_condition: + hostname: "{{ ontap_admin_ip }}" + username: "{{ ontap_admin_username }}" + password: "{{ ontap_admin_password }}" + https: true + validate_certs: no + name: sp_version + conditions: firmware_version + state: present + attributes: + node: "{{ ontap_admin_ip }}" + expected_version: 3.9 + polling_interval: 30 + timeout: 1800 +""" + +RETURN = """ +states: + description: + - summarized list of observed states while waiting for completion + - reported for success or timeout error + returned: always + type: str +last_state: + description: last observed state for event + returned: always + type: str +""" + +import time +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + + +class NetAppONTAPWFC: + ''' wait for a resource to match a condition or not ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str', choices=['snapmirror_relationship', 'sp_upgrade', 'sp_version']), + conditions=dict(required=True, type='list', elements='str'), + polling_interval=dict(required=False, type='int', default=5), + timeout=dict(required=False, type='int', default=180), + attributes=dict(required=False, type='dict') + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ('name', 'sp_upgrade', ['attributes']), + ('name', 'sp_version', ['attributes']), + ], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.states = [] + self.rest_api = netapp_utils.OntapRestAPI(self.module) + self.use_rest = self.rest_api.is_rest() + + if not self.use_rest: + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, wrap_zapi=True) + + self.resource_configuration = { + 'snapmirror_relationship': { + 'required_attributes': ['destination_path'], + 'conditions': { + 'state': ('state' if self.use_rest else 'not_supported', None), + 'transfer_state': ('transfer.state' if self.use_rest else 'not_supported', None) + } + }, + 'sp_upgrade': { + 'required_attributes': ['node'], + 'conditions': { + 'is_in_progress': ('service_processor.state', 'updating') if self.use_rest else ('is-in-progress', 'true') + } + }, + 'sp_version': { + 'required_attributes': ['node', 'expected_version'], + 'conditions': { + 'firmware_version': ('service_processor.firmware_version' if self.use_rest else 'firmware-version', + self.parameters['attributes'].get('expected_version')) + } + } + } + + name = 'snapmirror_relationship' + if self.parameters['name'] == name: + for condition in self.resource_configuration[name]['conditions']: + if condition in self.parameters['conditions']: + self.update_condition_value(name, condition) + + def update_condition_value(self, name, condition): + '''requires an expected value for a condition and sets it''' + expected_value = 'expected_%s' % condition + self.resource_configuration[name]['required_attributes'].append(expected_value) + # we can't update a tuple value, so rebuild the tuple + self.resource_configuration[name]['conditions'][condition] = ( + self.resource_configuration[name]['conditions'][condition][0], + self.parameters['attributes'].get(expected_value)) + + def get_fields(self, name): + return ','.join([field for (field, dummy) in self.resource_configuration[name]['conditions'].values()]) + + def get_key_value(self, record, key): + if self.use_rest: + # with REST, we can have nested dictionaries + key = key.split('.') + return self.na_helper.safe_get(record, key) + return self.get_key_value_zapi(record, key) + + def get_key_value_zapi(self, xml, key): + for child in xml.get_children(): + value = xml.get_child_content(key) + if value is not None: + return value + value = self.get_key_value(child, key) + if value is not None: + return value + return None + + def build_zapi(self, name): + ''' build ZAPI request based on resource name ''' + if name == 'sp_upgrade': + zapi_obj = netapp_utils.zapi.NaElement("service-processor-image-update-progress-get") + zapi_obj.add_new_child('node', self.parameters['attributes']['node']) + return zapi_obj + if name == 'sp_version': + zapi_obj = netapp_utils.zapi.NaElement("service-processor-get") + zapi_obj.add_new_child('node', self.parameters['attributes']['node']) + return zapi_obj + if name in self.resource_configuration: + self.module.fail_json(msg='Error: event %s is not supported with ZAPI. It requires REST.' % name) + raise KeyError(name) + + def build_rest_api_kwargs(self, name): + if name in ['sp_upgrade', 'sp_version']: + return { + 'api': 'cluster/nodes', + 'query': {'name': self.parameters['attributes']['node']}, + 'fields': self.get_fields(name) + } + if name == 'snapmirror_relationship': + return { + 'api': 'snapmirror/relationships', + 'query': {'destination.path': self.parameters['attributes']['destination_path']}, + 'fields': self.get_fields(name) + } + raise KeyError(name) + + def extract_condition(self, name, results): + ''' check if any of the conditions is present + return: + None, error if key is not found + condition, None if a key is found with expected value + None, None if every key does not match the expected values + ''' + for condition, (key, value) in self.resource_configuration[name]['conditions'].items(): + status = self.get_key_value(results, key) + if status is None and name == 'snapmirror_relationship' and results and condition == 'transfer_state': + # key is absent when not transferring. We convert this to 'idle' + status = 'idle' + self.states.append(str(status)) + if status == str(value): + return condition, None + if status is None: + return None, 'Cannot find element with name: %s in results: %s' % (key, results if self.use_rest else results.to_string()) + # not found, or no match + return None, None + + def get_condition(self, name, rest_or_zapi_args): + '''calls ZAPI or REST and extract condition value''' + record, error = self.get_record_rest(name, rest_or_zapi_args) if self.use_rest else self.get_record_zapi(name, rest_or_zapi_args) + if error: + return None, error + condition, error = self.extract_condition(name, record) + if error is not None: + return condition, error + if self.parameters['state'] == 'present': + if condition in self.parameters['conditions']: + return 'matched condition: %s' % condition, None + else: + if condition is None: + return 'conditions not matched', None + if condition not in self.parameters['conditions']: + return 'conditions not matched: found other condition: %s' % condition, None + return None, None + + def get_record_zapi(self, name, zapi_obj): + ''' calls the ZAPI and extract condition value''' + try: + results = self.server.invoke_successfully(zapi_obj, True) + except netapp_utils.zapi.NaApiError as error: + return None, 'Error running command %s: %s' % (self.parameters['name'], to_native(error)) + return results, None + + def get_record_rest(self, name, rest_api_kwargs): + record, error = rest_generic.get_one_record(self.rest_api, **rest_api_kwargs) + if error: + return None, 'Error running command %s: %s' % (self.parameters['name'], error) + if not record: + return None, "no record for node: %s" % rest_api_kwargs['query'] + return record, None + + def summarize_states(self): + ''' replaces a long list of states with multipliers + eg 'false*5' or 'false*2,true' + return: + state_list as str + last_state + ''' + previous_state = None + count = 0 + summaries = [] + for state in self.states: + if state == previous_state: + count += 1 + else: + if previous_state is not None: + summaries.append('%s%s' % (previous_state, '' if count == 1 else '*%d' % count)) + count = 1 + previous_state = state + if previous_state is not None: + summaries.append('%s%s' % (previous_state, '' if count == 1 else '*%d' % count)) + last_state = self.states[-1] if self.states else '' + return ','.join(summaries), last_state + + def wait_for_condition(self, name): + ''' calls the ZAPI and extract condition value - loop until found ''' + time_left = self.parameters['timeout'] + max_consecutive_error_count = 3 + error_count = 0 + rest_or_zapi_args = self.build_rest_api_kwargs(name) if self.use_rest else self.build_zapi(name) + + while time_left > 0: + condition, error = self.get_condition(name, rest_or_zapi_args) + if error is not None: + error_count += 1 + if error_count >= max_consecutive_error_count: + self.module.fail_json(msg='Error: %s - count: %d' % (error, error_count)) + elif condition is not None: + return condition + time.sleep(self.parameters['polling_interval']) + time_left -= self.parameters['polling_interval'] + + conditions = ["%s==%s" % (condition, self.resource_configuration[name]['conditions'][condition][1]) for condition in self.parameters['conditions']] + error = 'Error: timeout waiting for condition%s: %s.' %\ + ('s' if len(conditions) > 1 else '', + ', '.join(conditions)) + states, last_state = self.summarize_states() + self.module.fail_json(msg=error, states=states, last_state=last_state) + + def validate_resource(self, name): + if name not in self.resource_configuration: + raise KeyError('%s - configuration entry missing for resource' % name) + + def validate_attributes(self, name): + required = self.resource_configuration[name].get('required_attributes', list()) + msgs = [ + 'attributes: %s is required for resource name: %s' % (attribute, name) + for attribute in required + if attribute not in self.parameters['attributes'] + ] + + if msgs: + self.module.fail_json(msg='Error: %s' % ', '.join(msgs)) + + def validate_conditions(self, name): + conditions = self.resource_configuration[name].get('conditions') + msgs = [ + 'condition: %s is not valid for resource name: %s' % (condition, name) + for condition in self.parameters['conditions'] + if condition not in conditions + ] + + if msgs: + msgs.append('valid condition%s: %s' % + ('s are' if len(conditions) > 1 else ' is', ', '.join(conditions.keys()))) + self.module.fail_json(msg='Error: %s' % ', '.join(msgs)) + + def apply(self): + ''' calls the ZAPI and check conditions ''' + changed = False + name = self.parameters['name'] + self.validate_resource(name) + self.validate_attributes(name) + self.validate_conditions(name) + output = self.wait_for_condition(name) + states, last_state = self.summarize_states() + self.module.exit_json(changed=changed, msg=output, states=states, last_state=last_state) + + +def main(): + """ + Execute action from playbook + """ + command = NetAppONTAPWFC() + command.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py new file mode 100644 index 000000000..49844622c --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_wwpn_alias.py @@ -0,0 +1,194 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_ontap_wwpn_alias +''' + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified' +} + +DOCUMENTATION = ''' + +module: na_ontap_wwpn_alias +author: NetApp Ansible Team (@carchi8py) +short_description: NetApp ONTAP set FCP WWPN Alias +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap +version_added: '20.4.0' +description: + - Create/Delete FCP WWPN Alias + +options: + state: + description: + - Whether the specified alias should exist or not. + choices: ['present', 'absent'] + default: present + type: str + + name: + description: + - The name of the alias to create or delete. + required: true + type: str + + wwpn: + description: + - WWPN of the alias. + type: str + + vserver: + description: + - The name of the vserver to use. + required: true + type: str + +''' + +EXAMPLES = ''' + - name: Create FCP Alias + na_ontap_wwpn_alias: + state: present + name: alias1 + wwpn: 01:02:03:04:0a:0b:0c:0d + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + + - name: Delete FCP Alias + na_ontap_wwpn_alias: + state: absent + name: alias1 + vserver: ansibleVServer + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" +''' + +RETURN = ''' +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +class NetAppOntapWwpnAlias(object): + ''' ONTAP WWPN alias operations ''' + def __init__(self): + + self.argument_spec = netapp_utils.na_ontap_host_argument_spec() + self.argument_spec.update(dict( + state=dict(required=False, choices=[ + 'present', 'absent'], default='present'), + name=dict(required=True, type='str'), + wwpn=dict(required=False, type='str'), + vserver=dict(required=True, type='str') + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[('state', 'present', ['wwpn'])], + supports_check_mode=True + ) + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + # REST API should be used for ONTAP 9.6 or higher. + self.rest_api = OntapRestAPI(self.module) + if self.rest_api.is_rest(): + self.use_rest = True + else: + self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_wwpn_alias')) + + def get_alias(self, uuid): + params = {'fields': 'alias,wwpn', + 'alias': self.parameters['name'], + 'svm.uuid': uuid} + api = 'network/fc/wwpn-aliases' + message, error = self.rest_api.get(api, params) + if error is not None: + self.module.fail_json(msg="Error on fetching wwpn alias: %s" % error) + if message['num_records'] > 0: + return {'name': message['records'][0]['alias'], + 'wwpn': message['records'][0]['wwpn'], + } + else: + return None + + def create_alias(self, uuid, is_modify=False): + params = {'alias': self.parameters['name'], + 'wwpn': self.parameters['wwpn'], + 'svm.uuid': uuid} + api = 'network/fc/wwpn-aliases' + dummy, error = self.rest_api.post(api, params) + if error is not None: + if is_modify: + self.module.fail_json(msg="Error on modifying wwpn alias when trying to re-create alias: %s." % error) + else: + self.module.fail_json(msg="Error on creating wwpn alias: %s." % error) + + def delete_alias(self, uuid, is_modify=False): + api = 'network/fc/wwpn-aliases/%s/%s' % (uuid, self.parameters['name']) + dummy, error = self.rest_api.delete(api) + if error is not None: + if is_modify: + self.module.fail_json(msg="Error on modifying wwpn alias when trying to delete alias: %s." % error) + else: + self.module.fail_json(msg="Error on deleting wwpn alias: %s." % error) + + def get_svm_uuid(self): + """ + Get a svm's UUID + :return: uuid of the svm. + """ + params = {'fields': 'uuid', 'name': self.parameters['vserver']} + api = "svm/svms" + message, error = self.rest_api.get(api, params) + if error is not None: + self.module.fail_json(msg="Error on fetching svm uuid: %s" % error) + return message['records'][0]['uuid'] + + def apply(self): + cd_action, uuid, modify = None, None, None + uuid = self.get_svm_uuid() + current = self.get_alias(uuid) + cd_action = self.na_helper.get_cd_action(current, self.parameters) + if cd_action is None and self.parameters['state'] == 'present': + modify = self.na_helper.get_modified_attributes(current, self.parameters) + + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == 'create': + self.create_alias(uuid) + elif cd_action == 'delete': + self.delete_alias(uuid) + elif modify: + self.delete_alias(uuid, is_modify=True) + self.create_alias(uuid, is_modify=True) + result = netapp_utils.generate_result(self.na_helper.changed, cd_action, modify) + self.module.exit_json(**result) + + +def main(): + alias = NetAppOntapWwpnAlias() + alias.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py new file mode 100644 index 000000000..d928ff941 --- /dev/null +++ b/ansible_collections/netapp/ontap/plugins/modules/na_ontap_zapit.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +''' +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +author: NetApp Ansible Team (@carchi8py) +description: + - Call a ZAPI on ONTAP. + - Cluster ZAPIs are run using a cluster admin account. + - Vserver ZAPIs can be run using a vsadmin account or using vserver tunneling (cluster admin with I(vserver option)). + - In case of success, a json dictionary is returned as C(response). + - In case of a ZAPI error, C(status), C(errno), C(reason) are set to help with diagnosing the issue, + - and the call is reported as an error ('failed'). + - Other errors (eg connection issues) are reported as Ansible error. +extends_documentation_fragment: + - netapp.ontap.netapp.na_ontap_zapi +module: na_ontap_zapit +short_description: NetApp ONTAP Run any ZAPI on ONTAP +version_added: "20.4.0" +options: + zapi: + description: + - A dictionary for the zapi and arguments. + - An XML tag I(value) is a dictionary with tag as the key. + - Value can be another dictionary, a list of dictionaries, a string, or nothing. + - eg I() is represented as I(tag:) + - A single zapi can be called at a time. Ansible warns if duplicate keys are found and only uses the last entry. + required: true + type: dict + vserver: + description: + - if provided, forces vserver tunneling. username identifies a cluster admin account. + type: str +''' + +EXAMPLES = """ +- + name: Ontap ZAPI + hosts: localhost + gather_facts: False + collections: + - netapp.ontap + vars: + login: &login + hostname: "{{ admin_ip }}" + username: "{{ admin_username }}" + password: "{{ admin_password }}" + https: true + validate_certs: false + svm_login: &svm_login + hostname: "{{ svm_admin_ip }}" + username: "{{ svm_admin_username }}" + password: "{{ svm_admin_password }}" + https: true + validate_certs: false + + tasks: + - name: run ontap ZAPI command as cluster admin + netapp.ontap.na_ontap_zapit: + <<: *login + zapi: + system-get-version: + register: output + - debug: var=output + + - name: run ontap ZAPI command as cluster admin + netapp.ontap.na_ontap_zapit: + <<: *login + zapi: + vserver-get-iter: + register: output + - debug: var=output + + - name: run ontap ZAPI command as cluster admin + netapp.ontap.na_ontap_zapit: + <<: *login + zapi: + vserver-get-iter: + desired-attributes: + vserver-info: + - aggr-list: + - aggr-name + - allowed-protocols: + - protocols + - vserver-aggr-info-list: + - vserser-aggr-info + - uuid + query: + vserver-info: + vserver-name: trident_svm + register: output + - debug: var=output + + - name: run ontap ZAPI command as vsadmin + netapp.ontap.na_ontap_zapit: + <<: *svm_login + zapi: + vserver-get-iter: + desired-attributes: + vserver-info: + - uuid + register: output + - debug: var=output + + - name: run ontap ZAPI command as vserver tunneling + netapp.ontap.na_ontap_zapit: + <<: *login + vserver: trident_svm + zapi: + vserver-get-iter: + desired-attributes: + vserver-info: + - uuid + register: output + - debug: var=output + + - name: run ontap active-directory ZAPI command + netapp.ontap.na_ontap_zapit: + <<: *login + vserver: trident_svm + zapi: + active-directory-account-create: + account-name: testaccount + admin-username: testuser + admin-password: testpass + domain: testdomain + organizational-unit: testou + register: output + ignore_errors: True + - debug: var=output + +""" + +RETURN = """ +response: + description: + - If successful, a json dictionary representing the data returned by the ZAPI. + - If the ZAPI was executed but failed, an empty dictionary. + - Not present if the ZAPI call cannot be performed. + returned: On success + type: dict +status: + description: + - If the ZAPI was executed but failed, the status set by the ZAPI. + - Not present if successful, or if the ZAPI call cannot be performed. + returned: On error + type: str +errno: + description: + - If the ZAPI was executed but failed, the error code set by the ZAPI. + - Not present if successful, or if the ZAPI call cannot be performed. + returned: On error + type: str +reason: + description: + - If the ZAPI was executed but failed, the error reason set by the ZAPI. + - Not present if successful, or if the ZAPI call cannot be performed. + returned: On error + type: str +""" + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +try: + import xmltodict + HAS_XMLTODICT = True +except ImportError: + HAS_XMLTODICT = False + +try: + import json + HAS_JSON = True +except ImportError: + HAS_JSON = False + + +class NetAppONTAPZapi: + ''' calls a ZAPI command ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_ontap_zapi_only_spec() + self.argument_spec.update(dict( + zapi=dict(required=True, type='dict'), + vserver=dict(required=False, type='str'), + )) + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=False + ) + parameters = self.module.params + # set up state variables + self.zapi = parameters['zapi'] + self.vserver = parameters['vserver'] + + if not HAS_JSON: + self.module.fail_json(msg="the python json module is required") + if not netapp_utils.has_netapp_lib(): + self.module.fail_json(msg=netapp_utils.netapp_lib_is_required()) + if not HAS_XMLTODICT: + self.module.fail_json(msg="the python xmltodict module is required") + + if self.vserver is not None: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver) + else: + self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) + + def jsonify_and_parse_output(self, xml_data): + ''' convert from XML to JSON + extract status and error fields is present + ''' + try: + as_str = xml_data.to_string() + except Exception as exc: + self.module.fail_json(msg='Error running zapi in to_string: %s' % + str(exc)) + try: + as_dict = xmltodict.parse(as_str, xml_attribs=True) + except Exception as exc: + self.module.fail_json(msg='Error running zapi in xmltodict: %s: %s' % + (as_str, str(exc))) + try: + as_json = json.loads(json.dumps(as_dict)) + except Exception as exc: + self.module.fail_json(msg='Error running zapi in json load/dump: %s: %s' % + (as_dict, str(exc))) + + if 'results' not in as_json: + self.module.fail_json(msg='Error running zapi, no results field: %s: %s' % + (as_str, repr(as_json))) + + # set status, and if applicable errno/reason, and remove attribute fields + errno = None + reason = None + response = as_json.pop('results') + status = response.get('@status', 'no_status_attr') + if status != 'passed': + # collect errno and reason + errno = response.get('@errno', None) + if errno is None: + errno = response.get('errorno', None) + if errno is None: + errno = 'ESTATUSFAILED' + reason = response.get('@reason', None) + if reason is None: + reason = response.get('reason', None) + if reason is None: + reason = 'Execution failure with unknown reason.' + + for key in ('@status', '@errno', '@reason', '@xmlns'): + try: + # remove irrelevant info + del response[key] + except KeyError: + pass + return response, status, errno, reason + + def run_zapi(self): + ''' calls the ZAPI ''' + zapi_struct = self.zapi + error = None + if not isinstance(zapi_struct, dict): + error = 'A directory entry is expected, eg: system-get-version: ' + zapi = zapi_struct + else: + zapi = list(zapi_struct.keys()) + if len(zapi) != 1: + error = 'A single ZAPI can be called at a time' + else: + zapi = zapi[0] + + # log first, then error out as needed + if error: + self.module.fail_json(msg='%s, received: %s' % (error, zapi)) + + zapi_obj = netapp_utils.zapi.NaElement(zapi) + attributes = zapi_struct[zapi] + if attributes is not None and attributes != 'None': + zapi_obj.translate_struct(attributes) + + try: + output = self.server.invoke_elem(zapi_obj, True) + except netapp_utils.zapi.NaApiError as error: + self.module.fail_json(msg='Error running zapi %s: %s' % + (zapi, to_native(error)), + exception=traceback.format_exc()) + + return self.jsonify_and_parse_output(output) + + def apply(self): + ''' calls the zapi and returns json output ''' + response, status, errno, reason = self.run_zapi() + if status == 'passed': + self.module.exit_json(changed=True, response=response) + msg = 'ZAPI failure: check errno and reason.' + self.module.fail_json(changed=False, response=response, status=status, errno=errno, reason=reason, msg=msg) + + +def main(): + """ + Execute action from playbook + """ + zapi = NetAppONTAPZapi() + zapi.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/ontap/requirements.txt b/ansible_collections/netapp/ontap/requirements.txt new file mode 100644 index 000000000..03d5b106e --- /dev/null +++ b/ansible_collections/netapp/ontap/requirements.txt @@ -0,0 +1,9 @@ +deepdiff +ipaddress +isodate +jmespath +netapp-lib +requests +six +solidfire-sdk-python +xmltodict diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE new file mode 100644 index 000000000..20d40b6bc --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md new file mode 100644 index 000000000..0720ac4b3 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/README.md @@ -0,0 +1,131 @@ +na_ontap_cluster_config +========= + +Configure one or more of the following ONTAP settings: + +Licenses +Disk Assignments +Cluster DNS +NTP +SNMP +MOTD +Aggregates +Ports +Interface Groups +VLANS +Broadcast Domains +Intercluster LIFs + +Requirements +------------ + +Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release. + +Role Variables +-------------- +``` +# For ZAPI: +cluster: +# For REST, cluster should be null +cluster: +netapp_hostname: +netapp_username: +netapp_password: + +#Based on if Variables != or == None determins if a section runs. Each variable will take one or more dictonary entries. Simply omit sections +#that you don't want to run. The following would run all sections + +license_codes: AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAA,AAAAAAAAAAAAAA + +disks: # at current the disks module assigns all visiable disks to a node. If you are wanting to split disks, currently that has to be done manually + - cluster-01 + - cluster-02 + +motd: "The login in message you would like displayed when someone ssh's into the system" + +dns: + - { dns_domains: ansible.local, dns_nameservers: 1.1.1.1 } + +ntp: + - { server_name: time.nist.gov, version: auto } + +snmp: + - { community_name: public, access_control: ro } + +aggrs: + - { name: aggr1, node: cluster-01, disk_count: 26, max_raid: 26 } + - { name: aggr2, node: cluster-02, disk_count: 26, max_raid: 26 } + +ports: #* Ports also has variables 'autonegotiate', and 'flowcontrol' which default to true, and none but can be overriden by your playbook + - { node: cluster-01, port: e0c, mtu: 9000 } + - { node: cluster-01, port: e0d, mtu: 9000, flowcontrol: none, autonegotiate: false } + + +ifgrps: + - { name: a0a, node: cluster-01, ports: "e0a,e0b", mode: multimode, mtu 9000 } + - { name: a0a, node: cluster-02, ports: "e0a,e0b", mode: multimode, mtu 9000 } + +vlans: + - { id: 201, node: cluster-01, parent: a0a } + +bcasts: + - { name: Backup, mtu: 9000, ipspace: default, ports: 'cluster-01:e0c,vsim-02:e0c' } + +# with REST, ipspace is required. It is ignored with ZAPI. broadcast_domain and service_policy are also supported with REST. +inters: + - { name: intercluster_1, address: 172.32.0.187, netmask: 255.255.255.0, node: cluster-01, port: e0c, ipspace: Default } + - { name: intercluster_2, address: 172.32.0.188, netmask: 255.255.255.0, node: cluster-02, port: e0c, ipspace: Default } +``` +Dependencies +------------ + +The tasks in this role are dependent on information from the na_ontap_gather_facts module. +The task for na_ontap_gather_facts can not be excluded. + +Example Playbook +---------------- +``` +--- +- hosts: localhost + collections: + - netapp.ontap + vars_files: + - globals.yml + roles: + - na_ontap_cluster_config + ``` + +I use a globals file to hold my variables. +``` +cluster_name: cluster + +netapp_hostname: 172.32.0.182 +netapp_username: admin +netapp_password: netapp123 + +license_codes: + +aggrs: + - { name: aggr1, node: cluster-01, disk_count: 26, max_raid: 26 } + - { name: aggr2, node: cluster-02, disk_count: 26, max_raid: 26 } + +ifgrps: + - { name: a0a, node: cluster-01, port: "e0a", mode: multimode } + - { name: a0a, node: cluster-02, port: "e0a", mode: multimode } + - { name: a0a, node: cluster-01, port: "e0b", mode: multimode } + - { name: a0a, node: cluster-02, port: "e0b", mode: multimode } + +inters: + - { name: intercluster_1, address: 172.32.0.187, netmask: 255.255.255.0, node: cluster-01, port: e0c, ipspace: Default } + - { name: intercluster_2, address: 172.32.0.188, netmask: 255.255.255.0, node: cluster-02, port: e0c, ipspace: Default } +``` + +License +------- + +GNU v3 + +Author Information +------------------ +NetApp +http://www.netapp.io diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml new file mode 100644 index 000000000..976f64f4b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/defaults/main.yml @@ -0,0 +1,25 @@ +--- +netapp_hostname: +netapp_password: +netapp_username: +validate_certs: false +license_codes: +disks: +motd: +dns: +ntp: +snmp: +aggrs: +ports: +ifgrps: +vlans: +bcasts: +inters: +cluster: +cluster_name: temp +autonegotiate: true +flowcontrol: none +distribution_function: ip +ifgrp_mode: multimode_lacp +bcast_ipspace: Default +mtu: 9000 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml new file mode 100644 index 000000000..a0d9419cd --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for na-ontap-cluster-config diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml new file mode 100644 index 000000000..76068c016 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/meta/main.yml @@ -0,0 +1,9 @@ +galaxy_info: + author: "NetApp" + description: "Role for configuring an ONTAP cluster" + company: "NetApp" + license: BSD + min_ansible_version: 2.8 + platforms: + galaxy_tags: [netapp, ontap] + dependencies: [] diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml new file mode 100644 index 000000000..3889b3a60 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tasks/main.yml @@ -0,0 +1,210 @@ +--- +# tasks file for na-ontap-cluster-config +- name: Setup licenses + na_ontap_license: + state: present + license_codes: "{{ license_codes }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + when: license_codes != None +- name: Assign Disks + na_ontap_disks: + node: "{{ item }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ disks }}" + when: disks != None +- name: Set Login Message + na_ontap_login_messages: + vserver: "{{ cluster }}" + motd_message: "{{ motd }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + # TODO: support cluster scope in na_ontap_login_messages when using REST + use_rest: never + when: motd != None +- name: Setup DNS + na_ontap_dns: + state: present + vserver: "{{ cluster }}" + domains: "{{ item.dns_domains }}" + nameservers: "{{ item.dns_nameservers }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ dns }}" + when: dns != None +- name: Set NTP Server + na_ontap_ntp: + state: present + server_name: "{{ item.server_name }}" + version: "{{ item.version }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ ntp }}" + when: ntp != None +- name: Create SNMP community + na_ontap_snmp: + community_name: "{{ item.community_name }}" + access_control: "{{ item.access_control }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ snmp }}" + when: snmp != None +- name: Create Aggregates + na_ontap_aggregate: + state: present + service_state: online + name: "{{ item.name }}" + nodes: "{{ item.node }}" + disk_count: "{{ item.disk_count }}" + raid_size: "{{ item.max_raid }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ aggrs }}" + when: aggrs != None +- name: Remove ports from Default broadcast domain + na_ontap_broadcast_domain_ports: + state: absent + broadcast_domain: Default + ports: "{{ item.node }}:{{ item.port }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ ports }}" + when: ports != None + +# Configure flowcontrol and autonegotiate. +# Skip MTU for now b/c we have to configure IFGroup first. +- name: Modify Net Port + na_ontap_net_port: + state: present + node: "{{ item.node }}" + port: "{{ item.port }}" + # mtu: "{{ item.mtu }}" + autonegotiate_admin: "{{ item.autonegotiate | default(autonegotiate) }}" + flowcontrol_admin: "{{ item.flowcontrol | default(flowcontrol) }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ ports }}" + when: ports != None +- name: Create Interface Group + na_ontap_net_ifgrp: + state: present + distribution_function: "{{ distribution_function }}" + name: "{{ item.name }}" + ports: "{{ item.ports }}" + mode: "{{ item.mode | default(ifgrp_mode) }}" + node: "{{ item.node }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ ifgrps }}" + when: ifgrps != None + +# Set MTU - Interface group must be configured first. +- name: Modify Net Port + na_ontap_net_port: + state: present + node: "{{ item.node }}" + port: "{{ item.name }}" + mtu: "{{ item.mtu }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ ifgrps }}" + when: ifgrps != None + +- name: Create VLAN + na_ontap_net_vlan: + state: present + vlanid: "{{ item.id }}" + node: "{{ item.node }}" + parent_interface: "{{ item.parent }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vlans }}" + when: vlans != None + +- name: create broadcast domain + na_ontap_broadcast_domain: + state: present + broadcast_domain: "{{ item.name }}" + mtu: "{{ item.mtu }}" + ipspace: "{{ bcast_ipspace }}" + ports: "{{ item.ports }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ bcasts }}" + when: bcasts != None + +- name: Create Intercluster Lif + na_ontap_interface: + state: present + interface_name: "{{ item.name }}" + home_port: "{{ item.port }}" + home_node: "{{ item.node }}" + role: intercluster + admin_status: up + failover_policy: local-only + is_auto_revert: true + address: "{{ item.address }}" + netmask: "{{ item.netmask }}" + vserver: "{{ cluster }}" + ipspace: "{{ item.ipspace | default(omit) }}" + broadcast_domain: "{{ item.broadcast_domain | default(omit) }}" + service_policy: "{{ item.service_policy | default(omit) }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ inters }}" + when: inters != None diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml new file mode 100644 index 000000000..19887d5d6 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - na-ontap-cluster-config diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml new file mode 100644 index 000000000..68afc6e9d --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_cluster_config/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for na-ontap-cluster-config diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE new file mode 100644 index 000000000..f288702d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md new file mode 100644 index 000000000..1a7f489a9 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/README.md @@ -0,0 +1,65 @@ +na_ontap_nas_create +========= + +Create one or more NFS or CIFS exports + +Requirements +------------ + +Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release. + +Role Variables +-------------- +``` +cluster: +netapp_hostname: +netapp_username: +netapp_password: + +nas: + - { name: nfs_share, protocol: nfs, vserver: nfs_vserver, client: 172.32.0.201, ro: sys, rw: sys, su: sys, aggr: aggr1, size: 10, share: share_name } +# If you are creating an NFS export you will omit the share: section. +# If you are creating a CIFS share you may omit the ro, rw, su, client sections. + +``` +Dependencies +------------ + +The tasks in this role are dependent on information from the na_ontap_gather_facts module. +The task for na_ontap_gather_facts can not be excluded. + +Example Playbook +---------------- +``` +--- +- hosts: localhost + collections: + - netapp.ontap + vars_files: + - globals.yml + roles: + - na_ontap_nas_create +``` + +I use a globals file to hold my variables. +``` +cluster_name: cluster + +netapp_hostname: 172.32.0.182 +netapp_username: admin +netapp_password: netapp123 + +nas: + - { name: nfs_share, protocol: nfs, vserver: nfs_vserver, client: 172.32.0.201, ro: sys, rw: sys, su: sys, aggr: aggr1, size: 10 } + - { name: cifs_share, protocol: cifs, vserver: cifs_vserver, aggr: aggr1, size: 10, share: cifs_share_1 } +``` + +License +------- + +GNU v3 + +Author Information +------------------ +NetApp +http://www.netapp.io diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml new file mode 100644 index 000000000..cab01812d --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for na-ontap-nas-create +netapp_hostname: +netapp_password: +netapp_username: +validate_certs: false +nas: diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml new file mode 100644 index 000000000..0eac93e46 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for na-ontap-nas-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml new file mode 100644 index 000000000..dcf84686b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/meta/main.yml @@ -0,0 +1,9 @@ +galaxy_info: + author: "NetApp" + description: "Role for creating NFS and CIFS shares" + company: "NetApp" + license: BSD + min_ansible_version: 2.8 + platforms: + galaxy_tags: [netapp, ontap] + dependencies: [] diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml new file mode 100644 index 000000000..97596df21 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- name: Create Policy + na_ontap_export_policy: + state: present + name: "{{ item.name }}" + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ nas }}" + when: item.protocol == "nfs" +- name: Setup rules + na_ontap_export_policy_rule: + state: present + policy_name: "{{ item.name }}" + vserver: "{{ item.vserver }}" + client_match: "{{ item.client }}" + ro_rule: "{{ item.ro }}" + rw_rule: "{{ item.rw }}" + super_user_security: "{{ item.su }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ nas }}" + when: item.protocol == "nfs" +- name: Create volume + na_ontap_volume: + state: present + name: "{{ item.name }}" + aggregate_name: "{{ item.aggr }}" + size: "{{ item.size }}" + size_unit: gb + policy: "{{ 'default' if item.protocol.lower() == 'cifs' else item.name }}" + junction_path: "/{{ item.name }}" + space_guarantee: "none" + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ nas }}" +- name: Create Share + na_ontap_cifs: + state: present + share_name: "{{ item.share }}" + path: "/{{ item.name }}" + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ nas }}" + when: item.share is defined diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml new file mode 100644 index 000000000..59eed91a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - na-ontap-nas-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml new file mode 100644 index 000000000..6970cc80c --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_nas_create/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for na-ontap-nas-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE new file mode 100644 index 000000000..f288702d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md new file mode 100644 index 000000000..2e52d7460 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/README.md @@ -0,0 +1,67 @@ +na_ontap_san_create +========= + +Create one or more luns for iSCSI or FCP + +Requirements +------------ + +Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release. + +Role Variables +-------------- +``` +cluster: +netapp_hostname: +netapp_username: +netapp_password: + +igroups: + - { name: igroup1, vserver: san_vserver, group_type: iscsi, ostype: linux, initiator: "" } # the quotes for iqn/wwpn are necessary because of the : in them. +luns: + - { name: lun1, vol_name: lun_vol, aggr: aggr1, vserver: san_vserver, size: 10, ostype: linux, space_reserve: false, igroup: igroup1 } + +``` +Dependencies +------------ + +The tasks in this role are dependent on information from the na_ontap_gather_facts module. +The task for na_ontap_gather_facts can not be excluded. + +Example Playbook +---------------- +``` +--- +- hosts: localhost + collections: + - netapp.ontap + vars_files: + - globals.yml + roles + - na_ontap_san_create +``` + +I use a globals file to hold my variables. +``` +cluster_name: cluster + +netapp_hostname: 172.32.0.182 +netapp_username: admin +netapp_password: netapp123 + +igroups: + - { name: igroup1, vserver: san_vserver, group_type: iscsi, ostype: linux, initiator: "iqn.1994-05.com.redhat:2750d14d868d" } + +luns: + - { name: lun1, vol_name: lun_vol, vserver: san_vserver, size: 10, ostype: linux, space_reserve: false, igroup: igroup1 } +``` + +License +------- + +GNU v3 + +Author Information +------------------ +NetApp +http://www.netapp.io diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml new file mode 100644 index 000000000..60fbcbf57 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# defaults file for na-ontap-san-create +netapp_hostname: +netapp_password: +netapp_username: +validate_certs: false +igroups: +luns: diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml new file mode 100644 index 000000000..d2c5a8d62 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for na-ontap-san-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml new file mode 100644 index 000000000..cf44f8d4c --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/meta/main.yml @@ -0,0 +1,9 @@ +galaxy_info: + author: "NetApp" + description: "Role for creating LUNs" + company: "NetApp" + license: BSD + min_ansible_version: 2.8 + platforms: + galaxy_tags: [netapp, ontap] + dependencies: [] diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml new file mode 100644 index 000000000..23f50ba4e --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- name: Create iGroup + na_ontap_igroup: + state: present + name: "{{ item.name }}" + vserver: "{{ item.vserver }}" + initiator_group_type: "{{ item.group_type }}" + ostype: "{{ item.ostype }}" + initiator: "{{ item.initiator }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ igroups }}" + when: igroups != None +- name: Create volume + na_ontap_volume: + state: present + name: "{{ item.vol_name }}" + aggregate_name: "{{ item.aggr }}" + size: "{{ (item.size | int * 1.05) | round(0, 'ceil') | int }}" + size_unit: gb + space_guarantee: none + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ luns }}" +- name: Lun Create + na_ontap_lun: + state: present + name: "{{ item.name }}" + flexvol_name: "{{ item.vol_name }}" + vserver: "{{ item.vserver }}" + size: "{{ item.size }}" + size_unit: gb + ostype: "{{ item.ostype }}" + space_reserve: "{{ item.space_reserve }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ luns }}" + when: luns != None +- name: Create LUN mapping + na_ontap_lun_map: + state: present + initiator_group_name: "{{ item.igroup }}" + path: "/vol/{{ item.vol_name }}/{{ item.name }}" + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ luns }}" + when: luns != None diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml new file mode 100644 index 000000000..c308417e3 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - na-ontap-san-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml new file mode 100644 index 000000000..446a5b08b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_san_create/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for na-ontap-san-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md new file mode 100644 index 000000000..9fd214819 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/README.md @@ -0,0 +1,70 @@ +na_ontap_snapmirror_create +========= + +Create or verify the following + +Cluster peer +Vserver peer +Destination volume +Snapmirror relationship + +Requirements +------------ + +Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release. + +Role Variables +-------------- +``` +src_ontap: # IP or FQDN of the source ONTAP cluster +src_name: # Shortname of the source cluster +src_lif: # IP address of a source Intercluster LIF +src_vserver: # Name of source Vserver +src_volume: # Name of source FlexVol +dst_ontap: # IP or FQDN of the destination ONTAP cluster +dst_name: # Shortname of the destination cluster +dst_lif: # IP address of a destination Intercluster LIF +dst_aggr: # Aggregate to create destination FlexVol on +dst_vserver: # Name of destination Vserver +username: # Admin username of both clusters +password: # Password for Admin username +``` +Dependencies +------------ + +The tasks in this role are dependent on information from the na_ontap_gather_facts module. +The task for na_ontap_gather_facts can not be excluded. + +Example Playbook +---------------- +``` +--- +- hosts: localhost + name: Snapmirror Create + gather_facts: false + vars: + src_ontap: 172.32.0.182 + src_name: vsim + src_lif: 172.32.0.187 + src_vserver: Marketing + src_volume: Marketing_Presentation + dst_ontap: 172.32.0.192 + dst_name: cvo + dst_lif: 172.32.0.194 + dst_aggr: aggr1 + dst_vserver: backup_vserver + username: admin + password: netapp123 + roles: + - na_ontap_snapmirror_create +``` + +License +------- + +GNU v3 + +Author Information +------------------ +NetApp +http://www.netapp.io diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml new file mode 100644 index 000000000..f6321a9f1 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/defaults/main.yml @@ -0,0 +1,13 @@ +--- +# defaults file for na_ontap_snapmirror_create +src_ontap: +src_name: +src_lif: +src_vserver: +src_volume: +dst_ontap: +dst_lif: +dst_vserver: +dst_volume: "{{ src_volume }}_dest" +dst_aggr: +passphrase: IamAp483p45a83 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml new file mode 100644 index 000000000..041b2d100 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for na_ontap_snapmirror_create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml new file mode 100644 index 000000000..22aa13274 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/meta/main.yml @@ -0,0 +1,9 @@ +galaxy_info: + author: "NetApp" + description: "Create SnapMirror relationship" + company: "NetApp" + license: BSD + min_ansible_version: 2.8 + platforms: + galaxy_tags: [netapp, ontap] + dependencies: [] diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml new file mode 100644 index 000000000..84532341b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tasks/main.yml @@ -0,0 +1,55 @@ +--- +# tasks file for na_ontap_snapmirror_create +- name: Create cluster peer + na_ontap_cluster_peer: + state: present + source_intercluster_lifs: "{{ src_lif }}" + dest_intercluster_lifs: "{{ dst_lif }}" + passphrase: "{{ passphrase }}" + hostname: "{{ src_ontap }}" + username: "{{ username }}" + password: "{{ password }}" + dest_hostname: "{{ dst_ontap }}" + https: true + validate_certs: false +- name: Source vserver peer create + na_ontap_vserver_peer: + state: present + peer_vserver: "{{ dst_vserver }}" + peer_cluster: "{{ dst_name }}" + vserver: "{{ src_vserver }}" + applications: snapmirror + hostname: "{{ src_ontap }}" + username: "{{ username }}" + password: "{{ password }}" + dest_hostname: "{{ dst_ontap }}" + https: true + validate_certs: false +- name: Validate destination FlexVol + na_ontap_volume: + state: present + name: "{{ dst_volume }}" + is_infinite: false + aggregate_name: "{{ dst_aggr }}" + size: 20 + size_unit: mb + type: DP + # junction_path: "/{{ dst_volume }}" + vserver: "{{ dst_vserver }}" + hostname: "{{ dst_ontap }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false +- name: Create SnapMirror + na_ontap_snapmirror: + state: present + source_volume: "{{ src_volume }}" + destination_volume: "{{ dst_volume }}" + source_vserver: "{{ src_vserver }}" + destination_vserver: "{{ dst_vserver }}" + hostname: "{{ dst_ontap }}" + username: "{{ username }}" + password: "{{ password }}" + https: true + validate_certs: false diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml new file mode 100644 index 000000000..8189e13e9 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - na_ontap_snapmirror_create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml new file mode 100644 index 000000000..5a4950b6b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_snapmirror_create/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for na_ontap_snapmirror_create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE new file mode 100644 index 000000000..f288702d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md new file mode 100644 index 000000000..e146107d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/README.md @@ -0,0 +1,113 @@ +na_ontap_vserver_create +========= + +Create one or more Vservers. + +Creates Vserver with specified protocol(s). Will join to Windows Domain provided AD credintals are included. +Modifies default rule for NFS protocol to 0.0.0.0/0 ro to allow NFS connections + +Requirements +------------ + +Since this uses the NetApp ONTAP modules it will require the python library netapp-lib as well as the Ansible 2.8 release. + +Role Variables +-------------- +``` +cluster: +netapp_hostname: +netapp_username: +netapp_password: + +ontap_version: # OPTIONAL This defaults to ontap version minor code 140 (9.4) if running this against 9.3 or below add this variable and set to 120 + +#Based on if Variables != or == None determins if a section runs. Each variable will take one or more dictonary entries. Simply omit sections +#that you don't want to run. The following would run all sections + +vservers: # Vservers to create + - { name: nfs_vserver, aggr: aggr1, protocol: nfs } + # aggr_list is optional. If not specified all aggregates will be added to the allowed list. + - { name: nfs_vserver2, aggr: aggr1, protocol: nfs, aggr_list: "aggr1,aggr2" } + # with protocol: nfs, the keys nfsv3, nfsv4, nfsv41 are optional, the default values are as shown below. + - { name: nfs_vserver3, aggr: aggr1, protocol: nfs, nfsv3: enabled, nfsv4: disabled, nfsv41: disabled } + - { name: cifs_vserver, aggr: aggr1, protocol: cifs } + +vserver_dns: # DNS at the Vserver level. + - { vserver: cifs_vserver, dns_domains: lab.local, dns_nameservers: 172.32.0.40 } + +lifs: # interfaces for the Vservers being created - only IP interfaces are supported. + # with REST, ipspace, broadcast_domain, service_policy, interface_type (but only with a value of "ip") are also supported. + - { name: nfs_vserver_data_lif, vserver: nfs_vserver, node: cluster-01, port: e0c, protocol: nfs, address: 172.32.0.193, netmask: 255.255.255.0 } + - { name: cifs_vserver_data_lif, vserver: cifs_vserver, node: cluster-01, port: e0d, protocol: cifs, address: 172.32.0.194, netmask: 255.255.255.0 } + # With 21.24.0, protocol is not required when using REST. When protocol is absent, role and firewall_policy are omitted. + # With 21.24.0, vserver management interfaces can also be created when using REST: + - { name: vserver_mgmt_lif, vserver: nfs_vserver, node: cluster-01, port: e0e, service_policy: default-management, address: 172.32.0.192, netmask: 255.255.255.0} + +gateway: # To configure the default gateway for the Vserver. + - { vserver: nfs_vserver, destination: 0.0.0.0/0, gateway: 172.32.0.1 } + +cifs: # Vservers to join to an AD Domain + - { vserver: cifs_vserver, cifs_server_name: netapp1, domain: ansible.local, force: true } + +fcp: # sets FCP ports as Target + - { adapter: 0e, node: cluster-01 } +``` +Dependencies +------------ + +The tasks in this role are dependent on information from the na_ontap_gather_facts module. +The task for na_ontap_gather_facts can not be excluded. + +Example Playbook +---------------- +``` +--- +- hosts: localhost + collections: + - netapp.ontap + vars_prompt: + - name: admin_user_name + prompt: domain admin (enter if skipped) + - name: admin_password + prompt: domain admin password (enter if skipped) + vars_files: + - globals.yml + roles + - na_ontap_vserver_create +``` +I use a globals file to hold my variables. +``` +--- +globals.yml +cluster_name: cluster + +netapp_hostname: 172.32.0.182 +netapp_username: admin +netapp_password: netapp123 + +vservers: + - { name: nfs_vserver, aggr: aggr1, protocol: NFS } + - { name: cifs_vserver, aggr: aggr1, protocol: cifs } + - { name: nas_vserver, aggr: aggr1, protocol: 'cifs,nfs' } + +lifs: + - { name: nfs_vserver_data_lif, vserver: nfs_vserver, node: vsim-01, port: e0c, protocol: nfs, address: 172.32.0.183, netmask: 255.255.255.0 } + - { name: cifs_vserver_data_lif, vserver: cifs_vserver, node: vsim-01, port: e0c, protocol: nfs, address: 172.32.0.184, netmask: 255.255.255.0 } + - { name: nas_vserver_data_lif, vserver: nas_vserver, node: vsim-02, port: e0c, protocol: nfs, address: 172.32.0.185, netmask: 255.255.255.0 } + +vserver_dns: + - { vserver: cifs_vserver, dns_domains: lab.local, dns_nameservers: 172.32.0.40 } + +cifs: + - { vserver: cifs_vserver, cifs_server_name: netapp1, domain: openstack.local, force: true } +``` + +License +------- + +GNU v3 + +Author Information +------------------ +NetApp +http://www.netapp.io diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml new file mode 100644 index 000000000..950c41b9a --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/defaults/main.yml @@ -0,0 +1,14 @@ +--- +# defaults file for na-ontap-vserver-create +netapp_hostname: +netapp_username: +netapp_password: +validate_certs: false +vservers: +vserver_dns: +lifs: +cifs: +nfs: +fcp: +gateway: +ontap_version: 140 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml new file mode 100644 index 000000000..fcc2bf821 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for na-ontap-vserver-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml new file mode 100644 index 000000000..ddc4ca71c --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/meta/main.yml @@ -0,0 +1,8 @@ +galaxy_info: + author: NetApp + description: "Create one or more SVMs" + company: NetApp + license: BSD + min_ansible_version: 2.8 + galaxy_tags: [netapp, ontap] + dependencies: [] diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml new file mode 100644 index 000000000..9adbc1d05 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tasks/main.yml @@ -0,0 +1,198 @@ +--- +- name: Create Vserver + na_ontap_svm: + state: present + name: "{{ item.name }}" + root_volume: "{{ item.name }}_root" + root_volume_aggregate: "{{ item.aggr }}" + root_volume_security_style: "{{ 'ntfs' if item.protocol.lower() is search('cifs') else 'unix' }}" + # with REST, the 3 root_volume options above are not needed, and not supported + ignore_rest_unsupported_options: true + aggr_list: "{{ '*' if item.aggr_list is not defined else item.aggr_list }}" + max_volumes: "{{ item.max_volumes | default(omit) }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vservers }}" + when: vservers != None +- name: Setup FCP + na_ontap_fcp: + state: present + service_state: started + vserver: "{{ item.name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vservers }}" + when: item.protocol.lower() is search("fcp") +- name: Setup iSCSI + na_ontap_iscsi: + state: present + service_state: started + vserver: "{{ item.name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vservers }}" + when: item.protocol.lower() is search("iscsi") +- name: Modify adapter + na_ontap_ucadapter: + state: present + adapter_name: "{{ item.adapter_name }}" + node_name: "{{ item.node_name }}" + mode: fc + type: target + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ fcp }}" + when: fcp != None +- name: Create IP Interface + na_ontap_interface: + state: present + interface_name: "{{ item.name }}" + home_port: "{{ item.port }}" + home_node: "{{ item.node }}" + # role and protocols are deprecated + role: "{{ omit if item.protocol is not defined else 'data' }}" + protocols: "{{ item.protocol | default(omit) }}" + admin_status: up + failover_policy: "{{ omit if (item.protocol is defined and item.protocol.lower() is search('iscsi')) else 'system-defined' }}" + # service_policy is prefered over firewall_policy + firewall_policy: "{{ omit if item.service_policy is defined else 'data' }}" + is_auto_revert: "{{ omit if (item.protocol is defined and item.protocol.lower() is search('iscsi')) else 'true' }}" + ipspace: "{{ item.ipspace | default(omit) }}" + broadcast_domain: "{{ item.broadcast_domain | default(omit) }}" + service_policy: "{{ item.service_policy | default(omit) }}" + # only a value of IP is supported for now. But leave it open for FC. + interface_type: "{{ item.interface_type | default(omit) }}" + address: "{{ item.address }}" + netmask: "{{ item.netmask }}" + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ lifs }}" + when: lifs != None +- name: Add default route + na_ontap_net_routes: + state: present + vserver: "{{ item.vserver }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + destination: "{{ item.destination }}" + gateway: "{{ item.gateway }}" + metric: 30 + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ gateway }}" + when: gateway != None +- name: Create DNS + na_ontap_dns: + state: present + vserver: "{{ item.vserver }}" + domains: "{{ item.dns_domains }}" + nameservers: "{{ item.dns_nameservers }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vserver_dns }}" + when: vserver_dns !=None +- name: Create CIFS Server + na_ontap_cifs_server: + state: present + vserver: "{{ item.vserver }}" + domain: "{{ item.domain }}" + cifs_server_name: "{{ item.cifs_server_name }}" + force: "{{ 'false' if item.force is not defined else item.force }}" + admin_password: "{{ admin_password }}" + admin_user_name: "{{ admin_user_name }}" + ou: "{{ item.ou | default(omit) }}" + service_state: started + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ cifs }}" + when: cifs != None +- name: Create NFS Server + na_ontap_nfs: + state: present + service_state: started + vserver: "{{ item.name }}" + nfsv3: "{{ item.nfsv3 | default('enabled') }}" + nfsv4: "{{ item.nfsv4 | default('disabled') }}" + nfsv41: "{{ item.nfsv41 | default('disabled') }}" + tcp: enabled + udp: enabled + vstorage_state: disabled + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vservers }}" + when: + - item.protocol.lower() is search("nfs") + - ontap_version >= 130 +- name: Create NFS Server + na_ontap_nfs: + state: present + service_state: started + vserver: "{{ item.name }}" + nfsv3: "{{ item.nfsv3 | default('enabled') }}" + nfsv4: "{{ item.nfsv4 | default('disabled') }}" + nfsv41: "{{ item.nfsv41 | default('disabled') }}" + vstorage_state: disabled + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vservers }}" + when: + - item.protocol.lower() is search("nfs") + - ontap_version < 130 +- name: Setup default NFS rule + na_ontap_export_policy_rule: + state: present + policy_name: default + vserver: "{{ item.name }}" + client_match: 0.0.0.0/0 + rule_index: 1 + ro_rule: any + rw_rule: none + protocol: any + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: true + validate_certs: "{{ validate_certs }}" + with_items: + "{{ vservers }}" + when: + item.protocol.lower() is search("nfs") diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml new file mode 100644 index 000000000..0907ebf81 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - na-ontap-vserver-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml new file mode 100644 index 000000000..e9fb9f12b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_create/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for na-ontap-vserver-create diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/.travis.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/.travis.yml new file mode 100644 index 000000000..121cc4993 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/README.md b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/README.md new file mode 100644 index 000000000..805ab1df7 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/README.md @@ -0,0 +1,88 @@ +Role Name +========= + +This role deletes an ONTAP vserver and dependents: +- all volumes are deleted, including any user data !!! +- clones and snapshots are deleted as well !!! +- network interfaces are deleted +- as the vserver is deleted, the associated, DNS entries, routes, NFS/CIFS/iSCSI servers as applicable, export policies and rules, are automatically deleted by ONTAP. + +Requirements +------------ + +- ONTAP collection. +- ONTAP with REST support (9.7 or later). + +- The module requires the jmespath python package. +- If json_query is not found, you may need to install the community.general collection. +- C(ansible-galaxy collection install community.general) + +Role Variables +-------------- + +This role expects the following variables to be set: +- netapp_hostname: IP address of ONTAP admin interface (can be vsadmin too). +- netapp_username: user account with admin or vsadmin role. +- netapp_password: for the user account with admin or vsadmin role. +- vserver_name: name of vserver to delete. + +In order to delete a CIFS server, the following variables are required +- cifs_ad_admin_user_name: AD admin user name +- cifs_ad_admin_password: AD admin password + +The following variables are preset but can be changed +- https: true +- validate_certs: true (true is strongly recommended) +- debug_level: 0 +- enable_check_mode: false +- confirm_before_removing_cifs_server: true +- confirm_before_removing_igroups: true +- confirm_before_removing_interfaces: true +- confirm_before_removing_volumes: true +- cifs_force_delete: true (delete the CIFS server regardless of communication errors) + + +Example Playbook +---------------- + + + +``` +--- +- hosts: localhost + gather_facts: no + vars: + login: &login + netapp_hostname: ip_address + netapp_username: admin + netapp_password: XXXXXXXXX + https: true + validate_certs: false + roles: + - role: netapp.ontap.na_ontap_vserver_delete + vars: + <<: *login + vserver_name: ansibleSVM + # uncomment the following line to accept volumes will be permanently deleted + # removing_volumes_permanently_destroy_user_data: I agree + # turn confirmation prompts on or off + confirm_before_removing_cifs_server: false + confirm_before_removing_igroups: false + confirm_before_removing_interfaces: false + # optional - change the following to false to remove any confirmation prompt before deleting volumes !!! + # when confirmations are on, you may receive two prompts: + # 1. delete all clones if they exist. The prompt is not shown if no clone exists. + # 2. delete all volumes if any. The prompt is not shown if no volume exists. + confirm_before_removing_volumes: true + +``` + +License +------- + +BSD + +Author Information +------------------ + +https://github.com/ansible-collections/netapp.ontap diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/defaults/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/defaults/main.yml new file mode 100644 index 000000000..f4e84a864 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# defaults file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete +debug_level: 0 +cifs_force_delete: false +enable_check_mode: false +confirm_before_removing_cifs_server: true +confirm_before_removing_igroups: true +confirm_before_removing_interfaces: true +confirm_before_removing_volumes: true +https: true +validate_certs: true diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/handlers/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/handlers/main.yml new file mode 100644 index 000000000..c5e8cce1d --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/meta/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/meta/main.yml new file mode 100644 index 000000000..36ce858e2 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/meta/main.yml @@ -0,0 +1,9 @@ +galaxy_info: + author: NetApp + description: delete ONTAP vserver and all associated user data (volumes, snapshots, clones) + company: NetApp + issue_tracker_url: https://github.com/ansible-collections/netapp.ontap/issues + license: BSD + min_ansible_version: 2.9 + galaxy_tags: [netapp, ontap] + dependencies: [] diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/assert_prereqs_and_vserver_exists.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/assert_prereqs_and_vserver_exists.yml new file mode 100644 index 000000000..730deca61 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/assert_prereqs_and_vserver_exists.yml @@ -0,0 +1,66 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete + +- name: warn about data being permanently deleted + set_fact: + agreement: "{{ removing_volumes_permanently_destroy_user_data | default('not set') }}" + +- name: warn about data being permanently deleted + fail: + msg: | + This role will permanently delete all user data associated with volumes owned by vserver '{{ vserver_name }}'. + Make sure sure to set 'removing_volumes_permanently_destroy_user_data' to 'I agree' in your playbook. + Current value: {{ agreement }}. + when: agreement != 'I agree' + +- name: check REST is enabled and ONTAP version + na_ontap_restit: + api: cluster + query: + fields: version + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + register: results + +- name: check ONTAP version is 9.7 or better + assert: + that: results.response.version.generation > 9 or (results.response.version.generation == 9 and results.response.version.major >= 7) + quiet: true + +- name: check REST is enabled and SVM exists + na_ontap_rest_info: + gather_subset: + - vserver_info + parameters: + name: "{{ vserver_name }}" + fields: ip_interfaces + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + register: results + +- name: check query succeeded + assert: + that: '"num_records" in results.ontap_info["svm/svms"]' + quiet: true + +- name: check SVM exists - warn if not found + fail: + msg: "SVM {{ vserver_name }} not found" + when: results.ontap_info["svm/svms"]["num_records"] == 0 + ignore_errors: true + register: errors # to bypass the ignore-errors checker + +- name: check only one record was found + fail: + msg: "Unexpected results when getting SVM {{ vserver_name }}: {{ results }}" + when: results.ontap_info["svm/svms"]["num_records"] > 1 + +- name: record whether svm exists + set_fact: + svm_exists: '{{ results.ontap_info["svm/svms"]["num_records"] | bool }}' diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/delete_volumes.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/delete_volumes.yml new file mode 100644 index 000000000..8cbade581 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/delete_volumes.yml @@ -0,0 +1,23 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete + +- name: Delete all volumes for vserver + na_ontap_volume: + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + state: absent + name: "{{ item }}" + vserver: "{{ vserver_name }}" + check_mode: "{{ enable_check_mode }}" + ignore_errors: "{{ ignore_errors | default(omit) }}" + loop: + "{{ volumes }}" + register: results + +- name: debug + debug: + var: results + when: debug_level > 2 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes.yml new file mode 100644 index 000000000..bc69bf3d5 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes.yml @@ -0,0 +1,31 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete +- name: collect volumes for vserver + include_tasks: get_volumes.yml + +- name: ask for confirmation before deleting volumes + block: + - name: indicate if we are deleting clones + set_fact: + vol_types: "{{ 'volume clones' if is_flex else 'volumes' }}" + vars: + is_flex: "{{ is_flexclone | default(false) }}" + - name: ask for confirmation + pause: + prompt: "the following {{ vol_types }} will be deleted {{ volumes }}\nPress enter to continue, Ctrl+C to interrupt:" + when: + - volumes | length > 0 + - confirm_before_removing_volumes +- name: Delete all volumes for vserver + include_tasks: delete_volumes.yml + when: + - volumes | length > 0 + +- name: collect and delete all volumes for vserver + include_tasks: find_and_delete_volumes_retries.yml + when: + - volumes | length > 0 + # until: does not work with include_tasks :( Using a loop instead. + loop: "{{ range(1, retry_count)|list }}" + loop_control: + loop_var: retry diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes_retries.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes_retries.yml new file mode 100644 index 000000000..80625089b --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/find_and_delete_volumes_retries.yml @@ -0,0 +1,8 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete +- name: collect volumes for vserver + include_tasks: get_volumes.yml +- name: Delete all volumes for vserver + include_tasks: delete_volumes.yml + when: + - volumes | length > 0 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_cifs_server.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_cifs_server.yml new file mode 100644 index 000000000..9bf804202 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_cifs_server.yml @@ -0,0 +1,31 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete + +- name: collect CIFS server names for vserver - we expect a list of one + tags: gather + na_ontap_rest_info: + gather_subset: + - protocols/cifs/services + parameters: + svm.name: "{{ vserver_name }}" + fields: name + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + register: ontap +- name: debug + debug: + var: ontap + when: debug_level > 1 +- name: extract cifs_server + set_fact: + cifs_server: "{{ ontap.ontap_info | json_query(get_attrs) }}" + vars: + # double quotes around storage/volumes to escape the / + get_attrs: '"protocols/cifs/services".records[].name' +- name: debug + debug: + var: protocols/cifs/services + when: debug_level > 0 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_igroups.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_igroups.yml new file mode 100644 index 000000000..4dbb94687 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_igroups.yml @@ -0,0 +1,31 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete + +- name: collect igroups for vserver + tags: gather + na_ontap_rest_info: + gather_subset: + - protocols/san/igroups + parameters: + svm.name: "{{ vserver_name }}" + fields: name + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + register: ontap +- name: debug + debug: + var: ontap + when: debug_level > 1 +- name: extract igroups + set_fact: + igroups: "{{ ontap.ontap_info | json_query(get_attrs) }}" + vars: + # double quotes around storage/volumes to escape the / + get_attrs: '"protocols/san/igroups".records[].name' +- name: debug + debug: + var: protocols/san/igroups + when: debug_level > 0 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_interfaces.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_interfaces.yml new file mode 100644 index 000000000..86eb1c26a --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_interfaces.yml @@ -0,0 +1,31 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete + +- name: collect interfaces for vserver + tags: gather + na_ontap_rest_info: + gather_subset: + - vserver_info + parameters: + name: "{{ vserver_name }}" + fields: ip_interfaces + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + register: ontap +- name: debug + debug: + var: ontap + when: debug_level > 1 +- name: extract interface names + set_fact: + interfaces: "{{ ontap.ontap_info | json_query(get_attrs) }}" + vars: + # double quotes around storage/volumes to escape the / + get_attrs: '"svm/svms".records[].ip_interfaces[].name' +- name: debug + debug: + var: interfaces + when: debug_level > 0 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_volumes.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_volumes.yml new file mode 100644 index 000000000..0afc2af33 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/get_volumes.yml @@ -0,0 +1,33 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete + +- name: collect volumes for vserver + tags: gather + na_ontap_rest_info: + gather_subset: + - volume_info + parameters: + svm.name: "{{ vserver_name }}" + is_svm_root: false # we can't delete it, but it'll be removed with the vserver + clone.is_flexclone: "{{ is_flexclone | default(omit) }}" + fields: name + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + register: ontap +- name: debug + debug: + var: ontap + when: debug_level > 1 +- name: extract volume names + set_fact: + volumes: "{{ ontap.ontap_info | json_query(get_attrs) }}" + vars: + # double quotes around storage/volumes to escape the / + get_attrs: '"storage/volumes".records[].name' +- name: debug + debug: + var: volumes + when: debug_level > 0 diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/main.yml new file mode 100644 index 000000000..f1b958bb9 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tasks/main.yml @@ -0,0 +1,166 @@ +--- +# tasks file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete +# This deletes a vserver and dependents: +# all volumes are deleted, including any user data !!! +# network interfaces are deleted +# At present, clones or snapshots are not deleted, this may block the volume and vserver deletion. +# +# These tasks expect the following variables to be set: +# hostname: IP address of ONTAP admin interface (can be vsadmin too). +# username: user account with admin or vsadmin role. +# password: for the user account with admin or vsadmin role. +# vserver_name: name of vserver to delete. +# +# The following variables are preset but can be changed +# https: true +# validate_certs: true (true is strongly recommended, but use false if the certificate is not set) +# debug_level: 0 +# check_mode: false +# confirm_before_removing_interfaces: true +# confirm_before_removing_volumes: true +# confirm_before_removing_igroups: true +# confirm_before_removing_cifs_server: true +# +- name: set-facts netapp_hostname, netapp_username & netapp_password + set_fact: + netapp_hostname: "{{ hostname }}" + netapp_username: "{{ username }}" + netapp_password: "{{ password }}" + when: + - ( hostname is defined ) and ( hostname | length > 0 ) + - ( username is defined ) and ( username | length > 0 ) + - ( password is defined ) and ( password | length > 0 ) + - netapp_hostname is not defined + - netapp_username is not defined + - netapp_password is not defined + no_log: true + +- name: check REST is enabled and SVM exists + import_tasks: assert_prereqs_and_vserver_exists.yml + +- name: collect and delete volume clones for vserver + import_tasks: find_and_delete_volumes.yml + vars: + ignore_errors: true # delete may fail if a clone is attached to a clone + is_flexclone: true + retry_count: 3 # 3 levels of clones, looks like a lot + when: svm_exists + +- name: collect and delete all volumes for vserver + import_tasks: find_and_delete_volumes.yml + vars: + ignore_errors: true # so we can delete as many as possible + retry_count: 2 # retry any error + when: svm_exists + +- name: pause and check if any volume was not deleted + pause: + seconds: 30 + prompt: waiting for 30 seconds as volumes are deleted + when: + - svm_exists + - volumes | length > 0 + +- name: collect and delete all volumes for vserver + include_tasks: find_and_delete_volumes.yml + vars: + ignore_errors: false # so we can report errors + when: svm_exists + +- name: collect CIFS server + include_tasks: get_cifs_server.yml + when: svm_exists + +- name: ask for confirmations + pause: + prompt: "the following CIFS server will be deleted {{ cifs_server }}\nPress enter to continue, Ctrl+C to interrupt:" + when: + - svm_exists + - cifs_server | length > 0 + - confirm_before_removing_cifs_server + +- name: Delete CIFS server + na_ontap_cifs_server: + state: absent + cifs_server_name: "{{ item }}" + vserver: "{{ vserver_name }}" + admin_user_name: "{{ cifs_ad_admin_user_name }}" + admin_password: "{{ cifs_ad_admin_password }}" + force: "{{ cifs_force_delete }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + feature_flags: + trace_apis: true + check_mode: "{{ enable_check_mode }}" + loop: "{{ cifs_server }}" + when: svm_exists + +- name: collect igroups + include_tasks: get_igroups.yml + when: svm_exists + +- name: ask for confirmations + pause: + prompt: "the following igroups will be deleted {{ igroups }}\nPress enter to continue, Ctrl+C to interrupt:" + when: + - svm_exists + - igroups | length > 0 + - confirm_before_removing_igroups + +- name: Delete igroups + na_ontap_igroup: + state: absent + name: "{{ item }}" + vserver: "{{ vserver_name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + feature_flags: + trace_apis: true + check_mode: "{{ enable_check_mode }}" + loop: "{{ igroups }}" + when: svm_exists + +- name: collect interfaces for vserver + include_tasks: get_interfaces.yml + when: svm_exists + +- name: ask for confirmations + pause: + prompt: "the following interfaces will be deleted {{ interfaces }}\nPress enter to continue, Ctrl+C to interrupt:" + when: + - svm_exists + - interfaces | length > 0 + - confirm_before_removing_interfaces + +- name: Delete Interfaces + na_ontap_interface: + state: absent + interface_name: "{{ item }}" + vserver: "{{ vserver_name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" + check_mode: "{{ enable_check_mode }}" + loop: "{{ interfaces }}" + when: svm_exists + +- name: "Delete vserver - {{ vserver_name }}" + # also deletes any export policy and rules, NFS, CIFS, and iSCSI servers + # as well as DNS, routes + # let's do it, even if the vserver was not found to confirm idempotency + na_ontap_svm: + state: absent + name: "{{ vserver_name }}" + hostname: "{{ netapp_hostname }}" + username: "{{ netapp_username }}" + password: "{{ netapp_password }}" + https: "{{ https }}" + validate_certs: "{{ validate_certs }}" diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/inventory b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/inventory new file mode 100644 index 000000000..878877b07 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/test.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/test.yml new file mode 100644 index 000000000..c8296bb84 --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/tests/test.yml @@ -0,0 +1,24 @@ +--- +- hosts: localhost + gather_facts: false + vars: + login: &login + hostname: XXX.XXX.XXX.XXX + username: admin + password: XXXXXXXXXXXXXXX + https: true + validate_certs: false + roles: + - role: netapp.ontap.na_ontap_vserver_delete + vars: + <<: *login + vserver_name: ansibleSVM + # uncomment the following line to accept volumes will be permanently deleted + # removing_volumes_permanently_destroy_user_data: I agree + # turn confirmation prompts on or off + confirm_before_removing_interfaces: false + # optional - change the following to false to remove any confirmation prompt before deleting volumes !!! + # when confirmations are on, you may receive two prompts: + # 1. delete all clones if they exist. The prompt is not shown if no clone exists. + # 2. delete all volumes if any. The prompt is not shown if no volume exists. + confirm_before_removing_volumes: true diff --git a/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/vars/main.yml b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/vars/main.yml new file mode 100644 index 000000000..cdb47a89f --- /dev/null +++ b/ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible_collections/netapp/ontap/roles/na_ontap_vserver_delete diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.10.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.11.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.11.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.11.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.12.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.12.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.12.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.13.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.13.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.13.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.14.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.14.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.14.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.15.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.15.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.15.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.16.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.16.txt new file mode 100644 index 000000000..f4539e2ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.16.txt @@ -0,0 +1,4 @@ +plugins/modules/na_ontap_autosupport_invoke.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_login_messages.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_motd.py validate-modules:invalid-argument-name +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid diff --git a/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt new file mode 100644 index 000000000..5c626a030 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/sanity/ignore-2.9.txt @@ -0,0 +1 @@ +plugins/modules/na_ontap_nfs.py validate-modules:parameter-invalid \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/tests/unit/compat/__init__.py b/ansible_collections/netapp/ontap/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py b/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py new file mode 100644 index 000000000..feef5d758 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/compat/builtins.py @@ -0,0 +1,34 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + # pylint: disable=unused-import + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/netapp/ontap/tests/unit/compat/mock.py b/ansible_collections/netapp/ontap/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py b/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/mock_rest_and_zapi_requests.py b/ansible_collections/netapp/ontap/tests/unit/framework/mock_rest_and_zapi_requests.py new file mode 100644 index 000000000..a920eeab6 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/mock_rest_and_zapi_requests.py @@ -0,0 +1,288 @@ + +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Author: Laurent Nicolas, laurentn@netapp.com + +""" unit tests for Ansible modules for ONTAP: + fixture to mock REST send_request and ZAPI invoke_elem to trap all network calls + + Note: errors are reported as exception. Additional details are printed to the output. + pytest suppresses the output unless -s is used. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from copy import deepcopy +from functools import partial +import inspect +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +# set this to true to print messages about the fixture itself. +DEBUG = False +# if true, an error is raised if register_responses was not called. +FORCE_REGISTRATION = False + + +@pytest.fixture(autouse=True) +def patch_request_and_invoke(request): + if DEBUG: + print('entering patch_request_and_invoke fixture for', request.function) + function_name = request.function.__name__ + + with patch('time.sleep') as mock_time_sleep: + mock_time_sleep.side_effect = partial(_mock_time_sleep, function_name) + with patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') as mock_send_request: + mock_send_request.side_effect = partial(_mock_netapp_send_request, function_name) + if netapp_utils.has_netapp_lib(): + with patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapZAPICx.invoke_elem') as mock_invoke_elem: + mock_invoke_elem.side_effect = partial(_mock_netapp_invoke_elem, function_name) + yield mock_send_request, mock_invoke_elem + else: + yield mock_send_request + + # This part is executed after the test completes + _patch_request_and_invoke_exit_checks(function_name) + + +def register_responses(responses, function_name=None): + ''' When patching, the pytest request identifies the UT test function + if the registration is happening in a helper function, function_name needs to identify the calling test function + EG: + test_me(): + for x in range: + check_something() + if the registration happens in check_something, function_name needs to be set to test_me (as a string) + ''' + caller = inspect.currentframe().f_back.f_code.co_name + if DEBUG: + print('register_responses - caller:', caller, 'function_name:', function_name) + if function_name is not None and function_name != caller and (caller.startswith('test') or not function_name.startswith('test')): + raise KeyError('inspect reported a different name: %s, received: %s' % (caller, function_name)) + if function_name is None: + function_name = caller + fixed_records = [] + for record in responses: + try: + expected_method, expected_api, response = record + if expected_method not in ['ZAPI', 'GET', 'OPTIONS', 'POST', 'PATCH', 'DELETE']: + raise KeyError('Unexpected method %s in %s for function: %s' % (expected_method, record, function_name)) + except ValueError: + expected_method = 'ZAPI' + expected_api, response = record + if expected_method == 'ZAPI': + # sanity checks for netapp-lib are deferred until the test is actually run + response, valid = response + if valid != 'valid': + raise ImportError(response) + # some modules modify the record in place - keep the original intact + fixed_records.append((expected_method, expected_api, deepcopy(response))) + _RESPONSES[function_name] = fixed_records + + +def get_mock_record(function_name=None): + if function_name is None: + function_name = inspect.currentframe().f_back.f_code.co_name + return _REQUESTS.get(function_name) + + +def print_requests(function_name=None): + if function_name is None: + function_name = inspect.currentframe().f_back.f_code.co_name + if function_name not in _REQUESTS: + print('No request processed for %s' % function_name) + return + print('--- %s - processed requests ---' % function_name) + for record in _REQUESTS[function_name].get_requests(): + print(record) + print('--- %s - end of processed requests ---' % function_name) + + +def print_requests_and_responses(function_name=None): + if function_name is None: + function_name = inspect.currentframe().f_back.f_code.co_name + if function_name not in _REQUESTS: + print('No request processed for %s' % function_name) + return + print('--- %s - processed requests and responses---' % function_name) + for record in _REQUESTS[function_name].get_responses(): + print(record) + print('--- %s - end of processed requests and responses---' % function_name) + + +class MockCalls: + '''record calls''' + def __init__(self, function_name): + self.function_name = function_name + self.requests = [] + self.responses = [] + + def get_responses(self, method=None, api=None): + for record in self.responses: + if ((method is None or record.get('method') == method) + and (api is None or record.get('api') == api)): + yield record + + def get_requests(self, method=None, api=None, response=None): + for record in self.requests: + if ((method is None or record.get('method') == method) + and (api is None or record.get('api') == api) + and (response is None or record.get('response') == response)): + yield record + + def is_record_in_json(self, record, method, api, response=None): + for request in self.get_requests(method, api, response): + json = request.get('json') + if json and self._record_in_dict(record, json): + return True + return False + + def is_zapi_called(self, zapi): + return any(self.get_requests('ZAPI', zapi)) + + def get_request(self, sequence): + return self.requests[sequence] + + def is_text_in_zapi_request(self, text, sequence, present=True): + found = text in str(self.get_request(sequence)['zapi_request']) + if found != present: + not_expected = 'not ' if present else '' + print('Error: %s %sfound in %s' % (text, not_expected, self.get_request(sequence)['zapi_request'])) + return found + + # private methods + + def __del__(self): + if DEBUG: + print('Deleting MockCalls instance for', self.function_name) + + def _record_response(self, method, api, response): + print(method, api, response) + if method == 'ZAPI': + try: + response = response.to_string() + except AttributeError: + pass + self.responses.append((method, api, response)) + + @staticmethod + def _record_in_dict(record, adict): + for key, value in record.items(): + if key not in adict: + print('key: %s not found in %s' % (key, adict)) + return False + if value != adict[key]: + print('Values differ for key: %s: - %s vs %s' % (key, value, adict[key])) + return False + return True + + def _record_rest_request(self, method, api, params, json, headers, files): + record = { + 'params': params, + 'json': json, + 'headers': headers, + 'files': files, + } + self._record_request(method, api, record) + + def _record_zapi_request(self, zapi, na_element, enable_tunneling): + try: + zapi_request = na_element.to_string() + except AttributeError: + zapi_request = na_element + record = { + 'na_element': na_element, + 'zapi_request': zapi_request, + 'tunneling': enable_tunneling + } + self._record_request('ZAPI', zapi, record) + + def _record_request(self, method, api, record=None): + record = record or {} + record['function'] = self.function_name + record['method'] = method + record['api'] = api + self.requests.append(record) + + def _get_response(self, method, api): + response = _get_response(self.function_name, method, api) + self._record_response(method, api, response) + return response + + +# private variables and methods + +_REQUESTS = {} +_RESPONSES = {} + + +def _get_response(function, method, api): + if function not in _RESPONSES: + print('Error: make sure to add entries for %s in RESPONSES.' % function) + raise KeyError('function %s is not registered - %s %s' % (function, method, api)) + if not _RESPONSES[function]: + print('Error: exhausted all entries for %s in RESPONSES, received request for %s %s' % (function, method, api)) + print_requests(function) + raise KeyError('function %s received unhandled call %s %s' % (function, method, api)) + expected_method, expected_api, response = _RESPONSES[function][0] + if expected_method != method or expected_api not in ['*', api]: + print_requests(function) + raise KeyError('function %s received an unexpected call %s %s, expecting %s %s' % (function, method, api, expected_method, expected_api)) + _RESPONSES[function].pop(0) + if isinstance(response, Exception): + raise response + # some modules modify the record in place - keep the original intact + return deepcopy(response) + + +def _get_or_create_mock_record(function_name): + if function_name not in _REQUESTS: + _REQUESTS[function_name] = MockCalls(function_name) + return _REQUESTS[function_name] + + +def _mock_netapp_send_request(function_name, method, api, params, json=None, headers=None, files=None): + if DEBUG: + print('Inside _mock_netapp_send_request') + mock_calls = _get_or_create_mock_record(function_name) + mock_calls._record_rest_request(method, api, params, json, headers, files) + return mock_calls._get_response(method, api) + + +def _mock_netapp_invoke_elem(function_name, na_element, enable_tunneling=False): + if DEBUG: + print('Inside _mock_netapp_invoke_elem') + zapi = na_element.get_name() + mock_calls = _get_or_create_mock_record(function_name) + mock_calls._record_zapi_request(zapi, na_element, enable_tunneling) + return mock_calls._get_response('ZAPI', zapi) + + +def _mock_time_sleep(function_name, duration): + if DEBUG: + print('Inside _mock_time_sleep for %s' % function_name) + if duration > 0.1: + # the IDE or debug mode may add a small timer - only report for "large" value + raise KeyError("time.sleep(%s) was called - add: @patch('time.sleep')" % duration) + + +def _patch_request_and_invoke_exit_checks(function_name): + # action to be performed afther a test is complete + if DEBUG: + print('exiting patch_request_and_invoke fixture for', function_name) + if FORCE_REGISTRATION: + assert function_name in _RESPONSES, 'Error: responses for ZAPI invoke or REST send requests are not registered.' + # make sure all expected requests were consumed + if _RESPONSES.get(function_name): + print('Error: not all responses were processed. It is expected if the test failed.') + print('Error: remaining responses: %s' % _RESPONSES[function_name]) + msg = 'Error: not all responses were processed. Use -s to see detailed error. '\ + 'Ignore this error if there is an earlier error in the test.' + assert not _RESPONSES.get(function_name), msg + if function_name in _RESPONSES: + del _RESPONSES[function_name] + if function_name in _REQUESTS: + del _REQUESTS[function_name] diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/rest_factory.py b/ansible_collections/netapp/ontap/tests/unit/framework/rest_factory.py new file mode 100644 index 000000000..dfda97c03 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/rest_factory.py @@ -0,0 +1,107 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Author: Laurent Nicolas, laurentn@netapp.com + +""" unit tests for Ansible modules for ONTAP: + utility to build REST responses and errors, and register them to use them in testcases. + + 1) at the module level, define the REST responses: + SRR = rest_responses() if you're only interested in the default ones: 'empty', 'error', ... + SRR = rest_responses(dict) to use the default ones and augment them: + a key identifies a response name, and the value is a tuple. + + 3) in each test function, create a list of (event, response) using rest_response + def test_create_aggr(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'storage/aggregates', SRR['empty_good']) + ]) + + See ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate_rest.py + for an example. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +JOB_GET_API = ' cluster/jobs/94b6e6a7-d426-11eb-ac81-00505690980f' + + +def _build_job(state): + return (200, { + "uuid": "f03ccbb6-d8bb-11eb-ac81-00505690980f", + "description": "job results with state: %s" % state, + "state": state, + "message": "job reported %s" % state + }, None) + + +# name: (html_code, dict, None or error string) +# dict is translated into an xml structure, num_records is None or an integer >= 0 +_DEFAULT_RESPONSES = { + # common responses + 'is_rest': (200, {}, None), + 'is_rest_95': (200, dict(version=dict(generation=9, major=5, minor=0, full='dummy_9_5_0')), None), + 'is_rest_96': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy_9_6_0')), None), + 'is_rest_97': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy_9_7_0')), None), + 'is_rest_9_7_5': (200, dict(version=dict(generation=9, major=7, minor=5, full='dummy_9_7_5')), None), + 'is_rest_9_8_0': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy_9_8_0')), None), + 'is_rest_9_9_0': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy_9_9_0')), None), + 'is_rest_9_9_1': (200, dict(version=dict(generation=9, major=9, minor=1, full='dummy_9_9_1')), None), + 'is_rest_9_10_1': (200, dict(version=dict(generation=9, major=10, minor=1, full='dummy_9_10_1')), None), + 'is_rest_9_11_0': (200, dict(version=dict(generation=9, major=11, minor=0, full='dummy_9_11_0')), None), + 'is_rest_9_11_1': (200, dict(version=dict(generation=9, major=11, minor=1, full='dummy_9_11_1')), None), + 'is_rest_9_12_0': (200, dict(version=dict(generation=9, major=12, minor=0, full='dummy_9_12_0')), None), + 'is_rest_9_12_1': (200, dict(version=dict(generation=9, major=12, minor=1, full='dummy_9_12_1')), None), + 'is_rest_9_13_1': (200, dict(version=dict(generation=9, major=13, minor=1, full='dummy_9_13_1')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'success': (200, {}, None), + 'success_with_job_uuid': (200, {'job': {'_links': {'self': {'href': '/api/%s' % JOB_GET_API}}}}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'empty_records': (200, {'records': []}, None), + 'zero_records': (200, {'num_records': 0}, None), + 'one_record': (200, {'num_records': 1}, None), + 'one_vserver_record': (200, {'num_records': 1, 'records': [{'svm': {'name': 'svm_name', 'uuid': 'svm_uuid'}}]}, None), + 'generic_error': (400, None, "Expected error"), + 'error_record': (400, None, {'code': 6, 'message': 'Expected error'}), + 'job_generic_response_success': _build_job('success'), + 'job_generic_response_running': _build_job('running'), + 'job_generic_response_failure': _build_job('failure'), +} + + +def rest_error_message(error, api=None, extra='', got=None): + if got is None: + got = 'got Expected error.' + msg = ('%s: ' % error) if error else '' + msg += ('calling: %s: ' % api) if api else '' + msg += got + msg += extra + return msg + + +class rest_responses: + ''' return an object that behaves like a read-only dictionary + supports [key] to read an entry, and 'in' keyword to check key existence. + ''' + def __init__(self, adict=None, allow_override=True): + self.responses = dict(_DEFAULT_RESPONSES.items()) + if adict: + for key, value in adict.items(): + if not allow_override and key in self.responses: + raise KeyError('duplicated key: %s' % key) + self.responses[key] = value + + def _get_response(self, name): + try: + return self.responses[name] + except KeyError: + raise KeyError('%s not registered, list of valid keys: %s' % (name, self.responses.keys())) + + def __getitem__(self, name): + return self._get_response(name) + + def __contains__(self, name): + return name in self.responses diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests.py b/ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests.py new file mode 100644 index 000000000..bd94b027a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests.py @@ -0,0 +1,189 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module unit test fixture amd helper mock_rest_and_zapi_requests """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +import ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests as uut +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke as patch_fixture +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import zapi_responses, build_zapi_response + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') # pragma: no cover + +# REST API canned responses when mocking send_request. +# The rest_factory provides default responses shared across testcases. +SRR = rest_responses() +# ZAPI canned responses when mocking invoke_elem. +# The zapi_factory provides default responses shared across testcases. +ZRR = zapi_responses() + +uut.DEBUG = True + + +def test_register_responses(): + uut.register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('get-version', build_zapi_response({})), + ('get-bad-zapi', ('BAD_ZAPI', 'valid')) + ], 'test_register_responses') + assert uut._get_response('test_register_responses', 'GET', 'cluster') == SRR['is_rest'] + assert uut._get_response('test_register_responses', 'ZAPI', 'get-version').to_string() == build_zapi_response({})[0].to_string() + # If to_string() is not available, the ZAPI is registered as is. + assert uut._get_response('test_register_responses', 'ZAPI', 'get-bad-zapi') == 'BAD_ZAPI' + + +def test_negative_register_responses(): + with pytest.raises(KeyError) as exc: + uut.register_responses([ + ('MOVE', 'cluster', SRR['is_rest']), + ], 'not_me') + assert exc.value.args[0] == 'inspect reported a different name: test_negative_register_responses, received: not_me' + + with pytest.raises(KeyError) as exc: + uut.register_responses([ + ('MOVE', 'cluster', SRR['is_rest']), + ]) + assert 'Unexpected method MOVE' in exc.value.args[0] + + +def test_negative_get_response(): + with pytest.raises(KeyError) as exc: + uut._get_response('test_negative_get_response', 'POST', 'cluster') + assert exc.value.args[0] == 'function test_negative_get_response is not registered - POST cluster' + + uut.register_responses([ + ('GET', 'cluster', SRR['is_rest'])]) + with pytest.raises(KeyError) as exc: + uut._get_response('test_negative_get_response', 'POST', 'cluster') + assert exc.value.args[0] == 'function test_negative_get_response received an unexpected call POST cluster, expecting GET cluster' + + uut._get_response('test_negative_get_response', 'GET', 'cluster') + with pytest.raises(KeyError) as exc: + uut._get_response('test_negative_get_response', 'POST', 'cluster') + assert exc.value.args[0] == 'function test_negative_get_response received unhandled call POST cluster' + + +def test_record_rest_request(): + function_name = 'testme' + method = 'METHOD' + api = 'API' + params = 'PARAMS' + json = {'record': {'key': 'value'}} + headers = {} + files = {'data': 'value'} + calls = uut.MockCalls(function_name) + calls._record_rest_request(method, api, params, json, headers, files) + uut.print_requests(function_name) + assert len([calls.get_requests(method, api)]) == 1 + assert calls.is_record_in_json({'record': {'key': 'value'}}, 'METHOD', 'API') + assert not calls.is_record_in_json({'record': {'key': 'value1'}}, 'METHOD', 'API') + assert not calls.is_record_in_json({'key': 'value'}, 'METHOD', 'API') + + +def test_record_zapi_request(): + function_name = 'testme' + api = 'API' + zapi = build_zapi_response({}) + tunneling = False + calls = uut.MockCalls(function_name) + calls._record_zapi_request(api, zapi, tunneling) + uut.print_requests(function_name) + assert len([calls.get_requests('ZAPI', api)]) == 1 + assert calls.is_zapi_called('API') + assert not calls.is_zapi_called('version') + + +def test_negative_record_zapi_request(): + function_name = 'testme' + api = 'API' + zapi = 'STRING' # AttributeError is handled in the function + tunneling = False + calls = uut.MockCalls(function_name) + calls._record_zapi_request(api, zapi, tunneling) + uut.print_requests(function_name) + assert len([calls.get_requests('ZAPI', api)]) == 1 + + +def test_negative_record_zapi_response(): + function_name = 'testme' + api = 'API' + zapi = 'STRING' # AttributeError is handled in the function + calls = uut.MockCalls(function_name) + calls._record_response('ZAPI', api, zapi) + uut.print_requests_and_responses(function_name) + assert len([calls.get_responses('ZAPI', api)]) == 1 + + +def test_mock_netapp_send_request(): + function_name = 'test_mock_netapp_send_request' + method = 'GET' + api = 'cluster' + params = 'PARAMS' + uut.register_responses([ + ('GET', 'cluster', SRR['is_rest'])]) + response = uut._mock_netapp_send_request(function_name, method, api, params) + assert response == SRR['is_rest'] + + +def test_mock_netapp_invoke_elem(): + function_name = 'test_mock_netapp_invoke_elem' + method = 'ZAPI' + api = 'cluster' + params = 'PARAMS' + zapi = netapp_utils.zapi.NaElement.create_node_with_children('get-version') + uut.register_responses([ + ('get-version', build_zapi_response({}))]) + response = uut._mock_netapp_invoke_elem(function_name, zapi) + assert response.to_string() == build_zapi_response({})[0].to_string() + + +def test_print_requests_and_responses(): + uut.print_requests_and_responses() + + +def test_fixture(patch_fixture): + uut.register_responses([ + ('get-version', build_zapi_response({}))]) + mock_sr, mock_invoke = patch_fixture + cx = netapp_utils.OntapZAPICx() + cx.invoke_elem(netapp_utils.zapi.NaElement.create_node_with_children('get-version')) + assert 'test_fixture' in uut._RESPONSES + assert 'test_fixture' in uut._REQUESTS + uut.print_requests() + uut.print_requests_and_responses() + assert len(mock_sr.mock_calls) == 0 + assert len(mock_invoke.mock_calls) == 1 + calls = uut.get_mock_record() + assert len([calls.get_requests()]) == 1 + + +def test_fixture_exit_unregistered(patch_fixture): + uut.FORCE_REGISTRATION = True + with pytest.raises(AssertionError) as exc: + uut._patch_request_and_invoke_exit_checks('test_fixture_exit_unregistered') + msg = 'Error: responses for ZAPI invoke or REST send requests are not registered.' + assert msg in exc.value.args[0] + uut.FORCE_REGISTRATION = False + + +def test_fixture_exit_unused_response(patch_fixture): + uut.FORCE_REGISTRATION = True + uut.register_responses([ + ('get-version', build_zapi_response({}))]) + # report an error if any response is not consumed + with pytest.raises(AssertionError) as exc: + uut._patch_request_and_invoke_exit_checks('test_fixture_exit_unused_response') + msg = 'Error: not all responses were processed. Use -s to see detailed error. Ignore this error if there is an earlier error in the test.' + assert msg in exc.value.args[0] + # consume the response + cx = netapp_utils.OntapZAPICx() + cx.invoke_elem(netapp_utils.zapi.NaElement.create_node_with_children('get-version')) + uut.FORCE_REGISTRATION = False diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests_no_netapp_lib.py b/ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests_no_netapp_lib.py new file mode 100644 index 000000000..4d929c975 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/test_mock_rest_and_zapi_requests_no_netapp_lib.py @@ -0,0 +1,94 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module unit test fixture amd helper mock_rest_and_zapi_requests """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +import ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests as uut +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke as patch_fixture +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import zapi_responses, build_zapi_response, build_zapi_error + +# REST API canned responses when mocking send_request. +# The rest_factory provides default responses shared across testcases. +SRR = rest_responses() + +uut.DEBUG = True + + +@pytest.fixture(autouse=True, scope='module') +def patch_has_netapp_lib(): + with patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') as has_netapp_lib: + has_netapp_lib.return_value = False + yield has_netapp_lib + + +def test_import_error_zapi_responses(): + # ZAPI canned responses when mocking invoke_elem. + # The zapi_factory provides default responses shared across testcases. + ZRR = zapi_responses() + with pytest.raises(ImportError) as exc: + zapi = ZRR['empty'] + print("ZAPI", zapi) + msg = 'build_zapi_response: netapp-lib is missing' + assert msg == exc.value.args[0] + + +def test_register_responses(): + get_version = build_zapi_response({}) + with pytest.raises(ImportError) as exc: + uut.register_responses([ + ('get-version', get_version), + ('get-bad-zapi', 'BAD_ZAPI') + ], 'test_register_responses') + msg = 'build_zapi_response: netapp-lib is missing' + assert msg == exc.value.args[0] + + +def test_import_error_build_zapi_response(): + zapi = build_zapi_response({}) + expected = ('build_zapi_response: netapp-lib is missing', 'invalid') + assert expected == zapi + + +def test_import_error_build_zapi_error(): + zapi = build_zapi_error(12345, 'test') + expected = ('build_zapi_error: netapp-lib is missing', 'invalid') + assert expected == zapi + + +class Module: + def __init__(self): + self.params = { + 'username': 'user', + 'password': 'pwd', + 'hostname': 'host', + 'use_rest': 'never', + 'cert_filepath': None, + 'key_filepath': None, + 'validate_certs': False, + 'http_port': None, + 'feature_flags': None, + } + + +def test_fixture_no_netapp_lib(patch_fixture): + uut.register_responses([ + ('GET', 'cluster', (200, {}, None))]) + mock_sr = patch_fixture + cx = netapp_utils.OntapRestAPI(Module()) + cx.send_request('GET', 'cluster', None) + assert 'test_fixture_no_netapp_lib' in uut._RESPONSES + assert 'test_fixture_no_netapp_lib' in uut._REQUESTS + uut.print_requests() + uut.print_requests_and_responses() + assert len(mock_sr.mock_calls) == 1 + calls = uut.get_mock_record() + assert len([calls.get_requests()]) == 1 diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/test_rest_factory.py b/ansible_collections/netapp/ontap/tests/unit/framework/test_rest_factory.py new file mode 100644 index 000000000..f04dc4518 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/test_rest_factory.py @@ -0,0 +1,44 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module unit test helper rest_factory """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + + +def test_default_responses(): + srr = rest_responses() + assert srr + assert 'is_zapi' in srr + assert srr['is_zapi'] == (400, {}, "Unreachable") + + +def test_add_response(): + srr = rest_responses( + {'is_zapi': (444, {'k': 'v'}, "Unknown")} + ) + assert srr + assert 'is_zapi' in srr + assert srr['is_zapi'] == (444, {'k': 'v'}, "Unknown") + + +def test_negative_add_response(): + with pytest.raises(KeyError) as exc: + srr = rest_responses( + {'is_zapi': (444, {'k': 'v'}, "Unknown")}, allow_override=False + ) + print(exc.value) + assert 'duplicated key: is_zapi' == exc.value.args[0] + + +def test_negative_key_does_not_exist(): + srr = rest_responses() + with pytest.raises(KeyError) as exc: + srr['bad_key'] + print(exc.value) + msg = 'bad_key not registered, list of valid keys:' + assert msg in exc.value.args[0] diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/test_zapi_factory.py b/ansible_collections/netapp/ontap/tests/unit/framework/test_zapi_factory.py new file mode 100644 index 000000000..c82fb6a01 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/test_zapi_factory.py @@ -0,0 +1,108 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module unit test helper zapi_factory """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework import zapi_factory as uut + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') # pragma: no cover + +AGGR_INFO = {'num-records': 3, + 'attributes-list': + {'aggr-attributes': + {'aggregate-name': 'aggr_name', + 'aggr-raid-attributes': { + 'state': 'online', + 'disk-count': '4', + 'encrypt-with-aggr-key': 'true'}, + 'aggr-snaplock-attributes': {'snaplock-type': 'snap_t'}} + }, + } + + +def test_build_zapi_response_empty(): + empty, valid = uut.build_zapi_response({}) + assert valid == 'valid' + print(empty.to_string()) + assert empty.to_string() == b'' + + +def test_build_zapi_response_dict(): + aggr_info, valid = uut.build_zapi_response(AGGR_INFO) + assert valid == 'valid' + print(aggr_info.to_string()) + aggr_str = aggr_info.to_string() + assert b'aggr_name' in aggr_str + assert b'snap_t' in aggr_str + assert b'' in aggr_str + assert b'3' in aggr_str + + +def test_build_zapi_error(): + zapi1, valid = uut.build_zapi_error('54321', 'error_text') + assert valid == 'valid' + zapi2, valid = uut.build_zapi_error(54321, 'error_text') + assert valid == 'valid' + assert zapi1.to_string() == zapi2.to_string() + print(zapi1.to_string()) + assert zapi1.to_string() == b'' + + +def test_default_responses(): + zrr = uut.zapi_responses() + assert zrr + assert 'empty' in zrr + print(zrr['empty'][0].to_string()) + assert zrr['empty'][0].to_string() == uut.build_zapi_response({})[0].to_string() + + +def test_add_response(): + zrr = uut.zapi_responses( + {'empty': uut.build_zapi_response({'k': 'v'}, 1)} + ) + assert zrr + assert 'empty' in zrr + print(zrr['empty'][0].to_string()) + assert zrr['empty'][0].to_string() == uut.build_zapi_response({'k': 'v'}, 1)[0].to_string() + + +def test_negative_add_response(): + with pytest.raises(KeyError) as exc: + zrr = uut.zapi_responses( + {'empty': uut.build_zapi_response({})}, allow_override=False + ) + print(exc.value) + assert 'duplicated key: empty' == exc.value.args[0] + + +def test_negative_add_default_error(): + uut._DEFAULT_ERRORS['empty'] = uut.build_zapi_error(12345, 'hello') + with pytest.raises(KeyError) as exc: + zrr = uut.zapi_responses(allow_override=False) + print(exc.value) + assert 'duplicated key: empty' == exc.value.args[0] + del uut._DEFAULT_ERRORS['empty'] + + +def test_negative_add_error(): + with pytest.raises(KeyError) as exc: + zrr = uut.zapi_responses( + {'empty': uut.build_zapi_error(12345, 'hello')}, allow_override=False + ) + print(exc.value) + assert 'duplicated key: empty' == exc.value.args[0] + + +def test_negative_key_does_not_exist(): + zrr = uut.zapi_responses() + with pytest.raises(KeyError) as exc: + zrr['bad_key'] + print(exc.value) + msg = 'bad_key not registered, list of valid keys:' + assert msg in exc.value.args[0] diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/ut_utilities.py b/ansible_collections/netapp/ontap/tests/unit/framework/ut_utilities.py new file mode 100644 index 000000000..ac770604a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/ut_utilities.py @@ -0,0 +1,31 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Author: Laurent Nicolas, laurentn@netapp.com + +""" unit tests for Ansible modules for ONTAP: + shared utility functions +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys + + +def is_indexerror_exception_formatted(): + """ some versions of python do not format IndexError exception properly + the error message is not reported in str() or repr() + We see this for older versions of Ansible, where the python version is frozen + - fails on 3.5.7 but works on 3.5.10 + - fails on 3.6.8 but works on 3.6.9 + - fails on 3.7.4 but works on 3.7.5 + - fails on 3.8.0 but works on 3.8.1 + """ + return ( + sys.version_info[:2] == (2, 7) + or (sys.version_info[:2] == (3, 5) and sys.version_info[:3] > (3, 5, 7)) + or (sys.version_info[:2] == (3, 6) and sys.version_info[:3] > (3, 6, 8)) + or (sys.version_info[:2] == (3, 7) and sys.version_info[:3] > (3, 7, 4)) + or (sys.version_info[:2] == (3, 8) and sys.version_info[:3] > (3, 8, 0)) + or sys.version_info[:2] >= (3, 9) + ) diff --git a/ansible_collections/netapp/ontap/tests/unit/framework/zapi_factory.py b/ansible_collections/netapp/ontap/tests/unit/framework/zapi_factory.py new file mode 100644 index 000000000..5f23fbad0 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/framework/zapi_factory.py @@ -0,0 +1,148 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Author: Laurent Nicolas, laurentn@netapp.com + +""" unit tests for Ansible modules for ONTAP: + utility to build REST responses and errors, and register them to use them in testcases. + + 1) at the module level, define the ZAPI responses: + ZRR = zapi_responses() if you're only interested in the default ones: 'empty', 'error', ... + or + ZRR = zapi_responses(dict) to use the default ones and augment them: + a key identifies a response name, and the value is an XML structure. + + 2) create a ZAPI XML response or error using + build_zapi_response(contents, num_records=None) + build_zapi_error(errno, reason) + + Typically, these will be used with zapi_responses as + + ZRR = zapi_responses({ + 'aggr_info': build_zapi_response(aggr_info), + 'object_store_info': build_zapi_response(object_store_info), + 'disk_info': build_zapi_response(disk_info), + }) + + 3) in each test function, create a list of (event, response) using zapi_responses (and rest_responses) + def test_create(self): + register_responses([ + ('aggr-get-iter', ZRR['empty']), + ('aggr-create', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ]) + + See ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py + for an example. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +# name: (dict, num_records) +# dict is translated into an xml structure, num_records is None or an integer >= 0 +_DEFAULT_RESPONSES = { + 'empty': ({}, None), + 'success': ({}, None), + 'no_records': ({'num-records': '0'}, None), + 'one_record_no_data': ({'num-records': '1'}, None), + 'version': ({'version': 'zapi_version'}, None), + 'cserver': ({ + 'attributes-list': { + 'vserver-info': { + 'vserver-name': 'cserver' + } + }}, 1), +} +# name: (errno, reason) +# errno as int, reason as str +_DEFAULT_ERRORS = { + 'error': (12345, 'synthetic error for UT purpose'), + 'error_missing_api': (13005, 'Unable to find API: xxxx on data vserver') +} + + +def get_error_desc(error_code): + return next((err_desc for err_num, err_desc in _DEFAULT_ERRORS.values() if err_num == error_code), + 'no registered error for %d' % error_code) + + +def zapi_error_message(error, error_code=12345, reason=None, addal=None): + if reason is None: + reason = get_error_desc(error_code) + msg = "%s: NetApp API failed. Reason - %s:%s" % (error, error_code, reason) + if addal: + msg += addal + return msg + + +def build_raw_xml_response(contents, num_records=None, force_dummy=False): + top_contents = {'results': contents} + xml, valid = build_zapi_response(top_contents) + if valid == 'valid' and not force_dummy: + return xml.to_string() + return b'' + + +def build_zapi_response(contents, num_records=None): + ''' build an XML response + contents is translated into an xml structure + num_records is None or an integer >= 0 + ''' + if not netapp_utils.has_netapp_lib(): + # do not report an error at init, as it breaks ansible-test checks + return 'build_zapi_response: netapp-lib is missing', 'invalid' + if num_records is not None: + contents['num-records'] = str(num_records) + response = netapp_utils.zapi.NaElement('results') + response.translate_struct(contents) + response.add_attr('status', 'passed') + return (response, 'valid') + + +def build_zapi_error(errno, reason): + ''' build an XML response + errno as int + reason as str + ''' + if not netapp_utils.has_netapp_lib(): + return 'build_zapi_error: netapp-lib is missing', 'invalid' + response = netapp_utils.zapi.NaElement('results') + response.add_attr('errno', str(errno)) + response.add_attr('reason', reason) + return (response, 'valid') + + +class zapi_responses: + + def __init__(self, adict=None, allow_override=True): + self.responses = {} + for key, value in _DEFAULT_RESPONSES.items(): + self.responses[key] = build_zapi_response(*value) + for key, value in _DEFAULT_ERRORS.items(): + if key in self.responses: + raise KeyError('duplicated key: %s' % key) + self.responses[key] = build_zapi_error(*value) + if adict: + for key, value in adict.items(): + if not allow_override and key in self.responses: + raise KeyError('duplicated key: %s' % key) + self.responses[key] = value + + def _get_response(self, name): + try: + value, valid = self.responses[name] + # sanity checks for netapp-lib are deferred until the test is actually run + if valid != 'valid': + print("Error: Defer any runtime dereference, eg ZRR['key'], until runtime or protect dereference under has_netapp_lib().") + raise ImportError(value) + return value, valid + except KeyError: + raise KeyError('%s not registered, list of valid keys: %s' % (name, self.responses.keys())) + + def __getitem__(self, name): + return self._get_response(name) + + def __contains__(self, name): + return name in self.responses diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/filter/test_na_filter_iso8601.py b/ansible_collections/netapp/ontap/tests/unit/plugins/filter/test_na_filter_iso8601.py new file mode 100644 index 000000000..7bdb37191 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/filter/test_na_filter_iso8601.py @@ -0,0 +1,66 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for iso8601 filter """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.errors import AnsibleFilterError +from ansible_collections.netapp.ontap.plugins.filter import na_filter_iso8601 +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +from ansible_collections.netapp.ontap.tests.unit.framework import ut_utilities + +if na_filter_iso8601.IMPORT_ERROR and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as isodate is not available') + +ISO_DURATION = 'P689DT13H57M44S' +ISO_DURATION_WEEKS = 'P98W' +SECONDS_DURATION = 59579864 + + +def test_class_filter(): + my_obj = na_filter_iso8601.FilterModule() + assert len(my_obj.filters()) == 2 + + +def test_iso8601_duration_to_seconds(): + my_obj = na_filter_iso8601.FilterModule() + assert my_obj.filters()['iso8601_duration_to_seconds'](ISO_DURATION) == SECONDS_DURATION + + +def test_negative_iso8601_duration_to_seconds(): + my_obj = na_filter_iso8601.FilterModule() + with pytest.raises(AnsibleFilterError) as exc: + my_obj.filters()['iso8601_duration_to_seconds']('BAD_DATE') + print('EXC', exc) + # exception is not properly formatted with older 3.x versions, assuming same issue as for IndexError + if ut_utilities.is_indexerror_exception_formatted(): + assert 'BAD_DATE' in str(exc) + + +def test_iso8601_duration_from_seconds(): + my_obj = na_filter_iso8601.FilterModule() + assert my_obj.filters()['iso8601_duration_from_seconds'](SECONDS_DURATION) == ISO_DURATION + + +def test_negative_iso8601_duration_from_seconds_str(): + my_obj = na_filter_iso8601.FilterModule() + with pytest.raises(AnsibleFilterError) as exc: + my_obj.filters()['iso8601_duration_from_seconds']('BAD_INT') + print('EXC', exc) + if ut_utilities.is_indexerror_exception_formatted(): + assert 'BAD_INT' in str(exc) + + +@patch('ansible_collections.netapp.ontap.plugins.filter.na_filter_iso8601.IMPORT_ERROR', 'import failed') +def test_negative_check_for_import(): + my_obj = na_filter_iso8601.FilterModule() + with pytest.raises(AnsibleFilterError) as exc: + my_obj.filters()['iso8601_duration_to_seconds'](ISO_DURATION) + print('EXC', exc) + if ut_utilities.is_indexerror_exception_formatted(): + assert 'import failed' in str(exc) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/ansible_mocks.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/ansible_mocks.py new file mode 100644 index 000000000..85c0bc1b2 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/ansible_mocks.py @@ -0,0 +1,181 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import json +import pytest +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +from ansible_collections.netapp.ontap.plugins.module_utils.netapp import ZAPI_DEPRECATION_MESSAGE + +VERBOSE = True + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +WARNINGS = [] + + +def warn(dummy, msg): + print('WARNING:', msg) + WARNINGS.append(msg) + + +def expect_and_capture_ansible_exception(function, exception, *args, **kwargs): + ''' wraps a call to a funtion in a pytest.raises context and return the exception data as a dict + + function: the function to call -- without () + exception: 'exit' or 'fail' to trap Ansible exceptions raised by exit_json or fail_json + can also take an exception to test some corner cases (eg KeyError) + *args, **kwargs to capture any function arguments + ''' + if exception in ('fail', 'exit'): + exception = AnsibleFailJson if exception == 'fail' else AnsibleExitJson + if not (isinstance(exception, type) and issubclass(exception, Exception)): + raise KeyError('Error: got: %s, expecting fail, exit, or some exception' % exception) + with pytest.raises(exception) as exc: + function(*args, **kwargs) + if VERBOSE: + print('EXC:', exception, exc.value) + if exception in (AnsibleExitJson, AnsibleFailJson, Exception, AttributeError, KeyError, TypeError, ValueError): + return exc.value.args[0] + return exc + + +def call_main(my_main, default_args=None, module_args=None, fail=False): + ''' utility function to call a module main() entry point + my_main: main function for a module + default_args: a dict for the Ansible options - in general, what is accepted by all tests + module_args: additional options - in general what is specific to a test + + call main and should raise AnsibleExitJson or AnsibleFailJson + ''' + args = copy.deepcopy(default_args) if default_args else {} + if module_args: + args.update(module_args) + set_module_args(args) + return expect_and_capture_ansible_exception(my_main, 'fail' if fail else 'exit') + + +def create_module(my_module, default_args=None, module_args=None, check_mode=None, fail=False): + ''' utility function to create a module object + my_module: a class that represent an ONTAP Ansible module + default_args: a dict for the Ansible options - in general, what is accepted by all tests + module_args: additional options - in general what is specific to a test + check_mode: True/False - if not None, check_mode is set accordingly + + returns an instance of the module + ''' + args = copy.deepcopy(default_args) if default_args else {} + if module_args: + args.update(module_args) + set_module_args(args) + if fail: + return expect_and_capture_ansible_exception(my_module, 'fail') + my_module_object = my_module() + if check_mode is not None: + my_module_object.module.check_mode = check_mode + return my_module_object + + +def create_and_apply(my_module, default_args=None, module_args=None, fail=False, check_mode=None): + ''' utility function to create a module and call apply + + calls create_module, then calls the apply function and checks for: + AnsibleExitJson exception if fail is False or not present. + AnsibleFailJson exception if fail is True. + + see create_module for a description of the other arguments. + ''' + try: + my_obj = create_module(my_module, default_args, module_args, check_mode) + except Exception as exc: + print('Unexpected exception returned in create_module: %s' % exc) + print('If expected, use create_module with fail=True.') + raise + return expect_and_capture_ansible_exception(my_obj.apply, 'fail' if fail else 'exit') + + +# using pytest natively, without unittest.TestCase +@pytest.fixture(autouse=True) +def patch_ansible(): + with patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json, + warn=warn) as mocks: + clear_warnings() + # so that we get a SystemExit: 1 error (no able to read from stdin in ansible-test !) + # if set_module_args() was not called + basic._ANSIBLE_ARGS = None + yield mocks + + +def get_warnings(): + return WARNINGS + + +def print_warnings(framed=True): + if framed: + sep = '-' * 7 + title = ' WARNINGS ' + print(sep, title, sep) + for warning in WARNINGS: + print(warning) + if framed: + sep = '-' * (7 * 2 + len(title)) + print(sep) + + +def assert_no_warnings(): + assert not WARNINGS + + +def assert_no_warnings_except_zapi(): + # Deprecation message can appear more than once. Remove will only remove the first instance. + local_warning = list(set(WARNINGS)) + tmp_warnings = local_warning[:] + for warning in tmp_warnings: + if warning in ZAPI_DEPRECATION_MESSAGE: + local_warning.remove(ZAPI_DEPRECATION_MESSAGE) + assert not local_warning + + +def assert_warning_was_raised(warning, partial_match=False): + if partial_match: + assert any(warning in msg for msg in WARNINGS) + else: + assert warning in WARNINGS + + +def clear_warnings(): + global WARNINGS + WARNINGS = [] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py new file mode 100644 index 000000000..a96f08dea --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp.py @@ -0,0 +1,182 @@ +# Copyright (c) 2018-2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py - general features ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils import basic +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + +CERT_ARGS = { + 'hostname': 'test', + 'cert_filepath': 'test_pem.pem', + 'key_filepath': 'test_key.key' +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_ontap_module(default_args=None, module_args=None): + return create_module(MockONTAPModule, default_args, module_args).module + + +def test_has_feature_success_default(): + ''' existing feature_flag with default ''' + flag = 'deprecation_warning' + module = create_ontap_module(DEFAULT_ARGS) + assert netapp_utils.has_feature(module, flag) + + +def test_has_feature_success_user_true(): + ''' existing feature_flag with value set to True ''' + flag = 'user_deprecation_warning' + module_args = {'feature_flags': {flag: True}} + module = create_ontap_module(DEFAULT_ARGS, module_args) + assert netapp_utils.has_feature(module, flag) + + +def test_has_feature_success_user_false(): + ''' existing feature_flag with value set to False ''' + flag = 'user_deprecation_warning' + module_args = {'feature_flags': {flag: False}} + module = create_ontap_module(DEFAULT_ARGS, module_args) + assert not netapp_utils.has_feature(module, flag) + + +def test_has_feature_invalid_key(): + ''' existing feature_flag with unknown key ''' + flag = 'deprecation_warning_bad_key' + module = create_ontap_module(DEFAULT_ARGS) + msg = 'Internal error: unexpected feature flag: %s' % flag + assert expect_and_capture_ansible_exception(netapp_utils.has_feature, 'fail', module, flag)['msg'] == msg + + +def test_has_feature_invalid_bool(): + ''' existing feature_flag with non boolean value ''' + flag = 'user_deprecation_warning' + module_args = {'feature_flags': {flag: 'non bool'}} + module = create_ontap_module(DEFAULT_ARGS, module_args) + msg = 'Error: expected bool type for feature flag: %s' % flag + assert expect_and_capture_ansible_exception(netapp_utils.has_feature, 'fail', module, flag)['msg'] == msg + + +def test_get_na_ontap_host_argument_spec_peer(): + ''' validate spec does not have default key and feature_flags option ''' + spec = netapp_utils.na_ontap_host_argument_spec_peer() + for key in ('username', 'https'): + assert key in spec + assert 'feature_flags' not in spec + for entry in spec.values(): + assert 'type' in entry + assert 'default' not in entry + + +def test_setup_host_options_from_module_params_from_empty(): + ''' make sure module.params options are reflected in host_options ''' + module = create_ontap_module(DEFAULT_ARGS) + host_options = {} + keys = ('hostname', 'username') + netapp_utils.setup_host_options_from_module_params(host_options, module, keys) + # we gave 2 keys + assert len(host_options) == 2 + for key in keys: + assert host_options[key] == DEFAULT_ARGS[key] + + +def test_setup_host_options_from_module_params_username_not_set_when_cert_present(): + ''' make sure module.params options are reflected in host_options ''' + module = create_ontap_module(DEFAULT_ARGS) + host_options = dict(cert_filepath='some_path') + unchanged_keys = tuple(host_options.keys()) + copied_over_keys = ('hostname',) + ignored_keys = ('username',) + keys = unchanged_keys + copied_over_keys + ignored_keys + netapp_utils.setup_host_options_from_module_params(host_options, module, keys) + # we gave 2 keys + assert len(host_options) == 2 + for key in ignored_keys: + assert key not in host_options + for key in copied_over_keys: + assert host_options[key] == DEFAULT_ARGS[key] + print(host_options) + for key in unchanged_keys: + assert host_options[key] != DEFAULT_ARGS[key] + + +def test_setup_host_options_from_module_params_not_none_fields_are_preserved(): + ''' make sure module.params options are reflected in host_options ''' + args = dict(DEFAULT_ARGS) + args['cert_filepath'] = 'some_path' + module = create_ontap_module(args) + host_options = dict(cert_filepath='some_other_path') + unchanged_keys = tuple(host_options.keys()) + copied_over_keys = ('hostname',) + ignored_keys = ('username',) + keys = unchanged_keys + copied_over_keys + ignored_keys + netapp_utils.setup_host_options_from_module_params(host_options, module, keys) + # we gave 2 keys + assert len(host_options) == 2 + for key in ignored_keys: + assert key not in host_options + for key in copied_over_keys: + assert host_options[key] == args[key] + print(host_options) + for key in unchanged_keys: + assert host_options[key] != args[key] + + +def test_setup_host_options_from_module_params_cert_not_set_when_username_present(): + ''' make sure module.params options are reflected in host_options ''' + args = dict(DEFAULT_ARGS) + args['cert_filepath'] = 'some_path' + module = create_ontap_module(args) + host_options = dict(username='some_name') + unchanged_keys = tuple(host_options.keys()) + copied_over_keys = ('hostname',) + ignored_keys = ('cert_filepath',) + keys = unchanged_keys + copied_over_keys + ignored_keys + netapp_utils.setup_host_options_from_module_params(host_options, module, keys) + # we gave 2 keys + assert len(host_options) == 2 + for key in ignored_keys: + assert key not in host_options + for key in copied_over_keys: + assert host_options[key] == args[key] + print(host_options) + for key in unchanged_keys: + assert host_options[key] != args[key] + + +def test_setup_host_options_from_module_params_conflict(): + ''' make sure module.params options are reflected in host_options ''' + module = create_ontap_module(DEFAULT_ARGS) + host_options = dict(username='some_name', key_filepath='not allowed') + msg = 'Error: host cannot have both basic authentication (username/password) and certificate authentication (cert/key files).' + assert expect_and_capture_ansible_exception(netapp_utils.setup_host_options_from_module_params, + 'fail', host_options, module, host_options.keys())['msg'] == msg + + +def test_set_auth_method(): + args = {'hostname': None} + # neither password nor cert + error = expect_and_capture_ansible_exception(netapp_utils.set_auth_method, 'fail', create_ontap_module(args), None, None, None, None)['msg'] + assert 'Error: ONTAP module requires username/password or SSL certificate file(s)' in error + # keyfile but no cert + error = expect_and_capture_ansible_exception(netapp_utils.set_auth_method, 'fail', create_ontap_module(args), None, None, None, 'keyfile')['msg'] + assert 'Error: cannot have a key file without a cert file' in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_invoke_elem.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_invoke_elem.py new file mode 100644 index 000000000..a185fcb2d --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_invoke_elem.py @@ -0,0 +1,154 @@ +# Copyright (c) 2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""unit tests for module_utils netapp.py - ZAPI invoke_elem + + We cannot use the general UT framework as it patches invoke_elem +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_raw_xml_response, zapi_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip("skipping as missing required netapp_lib") + +ZRR = zapi_responses({ +}) + + +class MockModule: + def __init__(self): + self._name = 'testme' + + +class MockOpener: + def __init__(self, response=None, exception=None): + self.response = response + self.exception = exception + self.timeout = -1 + + def open(self, request, timeout=None): + self.timeout = timeout + if self.exception: + raise self.exception + return self.response + + +class MockResponse: + def __init__(self, contents, force_dummy=False): + self.response = build_raw_xml_response(contents, force_dummy=force_dummy) + print('RESPONSE', self.response) + + def read(self): + return self.response + + +def create_ontapzapicx_object(): + return netapp_utils.OntapZAPICx(module=MockModule()) + + +def test_error_invalid_naelement(): + ''' should fail when NaElement is None, empty, or not of type NaElement ''' + zapi_cx = create_ontapzapicx_object() + assert str(expect_and_capture_ansible_exception(zapi_cx.invoke_elem, ValueError, {})) ==\ + 'NaElement must be supplied to invoke API' + assert str(expect_and_capture_ansible_exception(zapi_cx.invoke_elem, ValueError, {'x': 'yz'})) ==\ + 'NaElement must be supplied to invoke API' + + +def test_exception_with_opener_generic_exception(): + zapi_cx = create_ontapzapicx_object() + zapi_cx._refresh_conn = False + zapi_cx._opener = MockOpener(exception=KeyError('testing')) + exc = expect_and_capture_ansible_exception(zapi_cx.invoke_elem, netapp_utils.zapi.NaApiError, ZRR['success'][0]) + # KeyError('testing') in 3.x but KeyError('testing',) with 2.7 + assert str(exc.value).startswith("NetApp API failed. Reason - Unexpected error:KeyError('testing'") + + +def test_exception_with_opener_httperror(): + if not hasattr(netapp_utils.zapi.urllib.error.HTTPError, 'reason'): + # skip the test in 2.6 as netapp_lib is not fully supported + # HTTPError does not support reason, and it's not worth changing the code + # raise zapi.NaApiError(exc.code, exc.reason) + # AttributeError: 'HTTPError' object has no attribute 'reason' + pytest.skip('this test requires HTTPError.reason which is not available in python 2.6') + zapi_cx = create_ontapzapicx_object() + zapi_cx._refresh_conn = False + zapi_cx._opener = MockOpener(exception=netapp_utils.zapi.urllib.error.HTTPError('url', 400, 'testing', None, None)) + exc = expect_and_capture_ansible_exception(zapi_cx.invoke_elem, netapp_utils.zapi.NaApiError, ZRR['success'][0]) + assert str(exc.value) == 'NetApp API failed. Reason - 400:testing' + + +def test_exception_with_opener_urlerror(): + # ConnectionRefusedError is not defined in 2.7 + connection_error = ConnectionRefusedError('UT') if sys.version_info >= (3, 0) else 'connection_error' + zapi_cx = create_ontapzapicx_object() + zapi_cx._refresh_conn = False + zapi_cx._opener = MockOpener(exception=netapp_utils.zapi.urllib.error.URLError(connection_error)) + exc = expect_and_capture_ansible_exception(zapi_cx.invoke_elem, netapp_utils.zapi.NaApiError, ZRR['success'][0]) + # skip the assert for 2.7 + # ConnectionRefusedError('UT'), with 3.x but ConnectionRefusedError('UT',), with 3.5 + assert str(exc.value).startswith("NetApp API failed. Reason - Unable to connect:(ConnectionRefusedError('UT'") or sys.version_info < (3, 0) + + zapi_cx._opener = MockOpener(exception=netapp_utils.zapi.urllib.error.URLError('connection_error')) + exc = expect_and_capture_ansible_exception(zapi_cx.invoke_elem, netapp_utils.zapi.NaApiError, ZRR['success'][0]) + # URLError('connection_error') with 3.x but URL error:URLError('connection_error',) with 2.7 + assert str(exc.value).startswith("NetApp API failed. Reason - URL error:URLError('connection_error'") + + # force an exception when reading exc.reason + exc = netapp_utils.zapi.urllib.error.URLError('connection_error') + delattr(exc, 'reason') + zapi_cx._opener = MockOpener(exception=exc) + exc = expect_and_capture_ansible_exception(zapi_cx.invoke_elem, netapp_utils.zapi.NaApiError, ZRR['success'][0]) + # URLError('connection_error') with 3.x but URL error:URLError('connection_error',) with 2.7 + assert str(exc.value).startswith("NetApp API failed. Reason - URL error:URLError('connection_error'") + + +def test_response(): + zapi_cx = create_ontapzapicx_object() + zapi_cx._refresh_conn = False + zapi_cx._timeout = 10 + zapi_cx._trace = True + zapi_cx._opener = MockOpener(MockResponse({})) + response = zapi_cx.invoke_elem(ZRR['success'][0]) + print(response) + assert response.to_string() == b'' + assert zapi_cx._opener.timeout == 10 + + +def test_response_no_netapp_lib(): + zapi_cx = create_ontapzapicx_object() + zapi_cx._refresh_conn = False + zapi_cx._timeout = 10 + zapi_cx._trace = True + zapi_cx._opener = MockOpener(MockResponse({}, True)) + response = zapi_cx.invoke_elem(ZRR['success'][0]) + print(response) + assert response.to_string() == b'' + assert zapi_cx._opener.timeout == 10 + + +def mock_build_opener(zapi_cx, opener): + def build_opener(): + zapi_cx._opener = opener + return build_opener + + +def test_response_build_opener(): + zapi_cx = create_ontapzapicx_object() + zapi_cx._refresh_conn = False + zapi_cx._trace = True + zapi_cx._build_opener = mock_build_opener(zapi_cx, MockOpener(MockResponse({}))) + response = zapi_cx.invoke_elem(ZRR['success'][0]) + print(response) + assert response.to_string() == b'' + assert zapi_cx._opener.timeout is None diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_ipaddress.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_ipaddress.py new file mode 100644 index 000000000..5d381d852 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_ipaddress.py @@ -0,0 +1,95 @@ +# Copyright (c) 2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp_ipaddress.py - REST features ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import expect_and_capture_ansible_exception, patch_ansible, create_module +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import netapp_ipaddress + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +class MockONTAPModule(object): + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_ontap_module(args=None): + if args is None: + args = {'hostname': 'xxx'} + return create_module(MockONTAPModule, args) + + +def test_check_ipaddress_is_present(): + assert netapp_ipaddress._check_ipaddress_is_present(None) is None + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp_ipaddress.HAS_IPADDRESS_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + error = 'Error: the python ipaddress package is required for this module. Import error: None' + assert error in expect_and_capture_ansible_exception(netapp_ipaddress._check_ipaddress_is_present, 'fail', create_ontap_module().module)['msg'] + + +def test_validate_and_compress_ip_address(): + module = create_ontap_module().module + valid_addresses = [ + # IPv4 + ['10.11.12.13', '10.11.12.13'], + # IPv6 + ['1111:0123:0012:0001:abcd:0abc:9891:abcd', '1111:123:12:1:abcd:abc:9891:abcd'], + ['1111:0000:0000:0000:abcd:0abc:9891:abcd', '1111::abcd:abc:9891:abcd'], + ['1111:0000:0000:0012:abcd:0000:0000:abcd', '1111::12:abcd:0:0:abcd'], + ['ffff:ffff:0000:0000:0000:0000:0000:0000', 'ffff:ffff::'], + ] + for before, after in valid_addresses: + assert after == netapp_ipaddress.validate_and_compress_ip_address(before, module) + + +def test_negative_validate_and_compress_ip_address(): + module = create_ontap_module().module + invalid_addresses = [ + # IPv4 + ['10.11.12.345', 'Invalid IP address value 10.11.12.345'], + # IPv6 + ['1111:0123:0012:0001:abcd:0abc:9891:abcg', 'Invalid IP address value'], + ['1111:0000:0000:0000:abcd:9891:abcd', 'Invalid IP address value'], + ['1111:::0012:abcd::abcd', 'Invalid IP address value'], + ] + for before, error in invalid_addresses: + assert error in expect_and_capture_ansible_exception(netapp_ipaddress.validate_and_compress_ip_address, 'fail', before, module)['msg'] + + +def test_netmask_to_len(): + module = create_ontap_module().module + assert netapp_ipaddress.netmask_to_netmask_length('10.10.10.10', '255.255.0.0', module) == 16 + assert netapp_ipaddress.netmask_to_netmask_length('1111::', 16, module) == 16 + assert netapp_ipaddress.netmask_to_netmask_length('1111::', '16', module) == 16 + error = 'Error: only prefix_len is supported for IPv6 addresses, got ffff::' + assert error in expect_and_capture_ansible_exception(netapp_ipaddress.netmask_to_netmask_length, 'fail', '1111::', 'ffff::', module)['msg'] + error = 'Error: Invalid IP network value 10.11.12.13/abc.' + assert error in expect_and_capture_ansible_exception(netapp_ipaddress.netmask_to_netmask_length, 'fail', '10.11.12.13', 'abc', module)['msg'] + + +def test_len_to_netmask(): + module = create_ontap_module().module + assert netapp_ipaddress.netmask_length_to_netmask('10.10.10.10', 16, module) == '255.255.0.0' + assert netapp_ipaddress.netmask_length_to_netmask('1111::', 16, module) == 'ffff::' + + +def test_validate_ip_address_is_network_address(): + module = create_ontap_module().module + assert netapp_ipaddress.validate_ip_address_is_network_address('10.11.12.0', module) is None + assert netapp_ipaddress.validate_ip_address_is_network_address('10.11.12.0/24', module) is None + error = 'Error: Invalid IP network value 10.11.12.0/21' + assert error in expect_and_capture_ansible_exception(netapp_ipaddress.validate_ip_address_is_network_address, 'fail', '10.11.12.0/21', module)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py new file mode 100644 index 000000000..55729d7cd --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_module.py @@ -0,0 +1,885 @@ +# Copyright (c) 2018-2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for module_utils netapp_module.py """ +from __future__ import (absolute_import, division, print_function) +import copy +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule as na_helper, cmp as na_cmp +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, assert_warning_was_raised, clear_warnings, patch_ansible, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response +from ansible_collections.netapp.ontap.tests.unit.framework import ut_utilities + + +class MockONTAPModule(object): + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + self.na_helper = na_helper(self.module) + self.na_helper.set_parameters(self.module.params) + + +class MockONTAPModuleV2(object): + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + self.na_helper = na_helper(self) + self.na_helper.set_parameters(self.module.params) + + +def create_ontap_module(args=None, version=1): + if version == 2: + return create_module(MockONTAPModuleV2, args) + return create_module(MockONTAPModule, args) + + +def test_get_cd_action_create(): + """ validate cd_action for create """ + current = None + desired = {'state': 'present'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result == 'create' + + +def test_get_cd_action_delete(): + """ validate cd_action for delete """ + current = {'state': 'absent'} + desired = {'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result == 'delete' + + +def test_get_cd_action_already_exist(): + """ validate cd_action for returning None """ + current = {'state': 'whatever'} + desired = {'state': 'present'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result is None + + +def test_get_cd_action_already_absent(): + """ validate cd_action for returning None """ + current = None + desired = {'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_cd_action(current, desired) + assert result is None + + +def test_get_modified_attributes_for_no_data(): + """ validate modified attributes when current is None """ + current = None + desired = {'name': 'test'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + +def test_get_modified_attributes(): + """ validate modified attributes """ + current = {'name': ['test', 'abcd', 'xyz', 'pqr'], 'state': 'present'} + desired = {'name': ['abcd', 'abc', 'xyz', 'pqr'], 'state': 'absent'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == desired + + +def test_get_modified_attributes_for_intersecting_mixed_list(): + """ validate modified attributes for list diff """ + current = {'name': [2, 'four', 'six', 8]} + desired = {'name': ['a', 8, 'ab', 'four', 'abcd']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abcd']} + + +def test_get_modified_attributes_for_intersecting_list(): + """ validate modified attributes for list diff """ + current = {'name': ['two', 'four', 'six', 'eight']} + desired = {'name': ['a', 'six', 'ab', 'four', 'abc']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abc']} + + +def test_get_modified_attributes_for_nonintersecting_list(): + """ validate modified attributes for list diff """ + current = {'name': ['two', 'four', 'six', 'eight']} + desired = {'name': ['a', 'ab', 'abd']} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['a', 'ab', 'abd']} + + +def test_get_modified_attributes_for_list_of_dicts_no_data(): + """ validate modified attributes for list diff """ + current = None + desired = {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {} + + +def test_get_modified_attributes_for_intersecting_list_of_dicts(): + """ validate modified attributes for list diff """ + current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]} + desired = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]} + + +def test_get_modified_attributes_for_nonintersecting_list_of_dicts(): + """ validate modified attributes for list diff """ + current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]} + desired = {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]} + + +def test_get_modified_attributes_for_list_diff(): + """ validate modified attributes for list diff """ + current = {'name': ['test', 'abcd'], 'state': 'present'} + desired = {'name': ['abcd', 'abc'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'name': ['abc']} + + +def test_get_modified_attributes_for_no_change(): + """ validate modified attributes for same data in current and desired """ + current = {'name': 'test'} + desired = {'name': 'test'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + +def test_get_modified_attributes_for_an_empty_desired_list(): + """ validate modified attributes for an empty desired list """ + current = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'} + desired = {'snapmirror_label': [], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {'snapmirror_label': []} + + +def test_get_modified_attributes_for_an_empty_desired_list_diff(): + """ validate modified attributes for an empty desired list with diff""" + current = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'} + desired = {'snapmirror_label': [], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'snapmirror_label': []} + + +def test_get_modified_attributes_for_an_empty_current_list(): + """ validate modified attributes for an empty current list """ + current = {'snapmirror_label': [], 'state': 'present'} + desired = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {'snapmirror_label': ['daily', 'weekly', 'monthly']} + + +def test_get_modified_attributes_for_an_empty_current_list_diff(): + """ validate modified attributes for an empty current list with diff""" + current = {'snapmirror_label': [], 'state': 'present'} + desired = {'snapmirror_label': ['daily', 'weekly', 'monthly'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'snapmirror_label': ['daily', 'weekly', 'monthly']} + + +def test_get_modified_attributes_for_empty_lists(): + """ validate modified attributes for empty lists """ + current = {'snapmirror_label': [], 'state': 'present'} + desired = {'snapmirror_label': [], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired) + assert result == {} + + +def test_get_modified_attributes_for_empty_lists_diff(): + """ validate modified attributes for empty lists with diff """ + current = {'snapmirror_label': [], 'state': 'present'} + desired = {'snapmirror_label': [], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {} + + +def test_get_modified_attributes_equal_lists_with_duplicates(): + """ validate modified attributes for equal lists with duplicates """ + current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, False) + assert result == {} + + +def test_get_modified_attributes_equal_lists_with_duplicates_diff(): + """ validate modified attributes for equal lists with duplicates with diff """ + current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {} + + +def test_get_modified_attributes_for_current_list_with_duplicates(): + """ validate modified attributes for current list with duplicates """ + current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + desired = {'schedule': ['daily', 'daily', 'weekly', 'monthly'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, False) + assert result == {'schedule': ['daily', 'daily', 'weekly', 'monthly']} + + +def test_get_modified_attributes_for_current_list_with_duplicates_diff(): + """ validate modified attributes for current list with duplicates with diff """ + current = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + desired = {'schedule': ['daily', 'daily', 'weekly', 'monthly'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'schedule': []} + + +def test_get_modified_attributes_for_desired_list_with_duplicates(): + """ validate modified attributes for desired list with duplicates """ + current = {'schedule': ['daily', 'weekly', 'monthly'], 'state': 'present'} + desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, False) + assert result == {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily']} + + +def test_get_modified_attributes_for_desired_list_with_duplicates_diff(): + """ validate modified attributes for desired list with duplicates with diff """ + current = {'schedule': ['daily', 'weekly', 'monthly'], 'state': 'present'} + desired = {'schedule': ['hourly', 'daily', 'daily', 'weekly', 'monthly', 'daily'], 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'schedule': ['hourly', 'daily', 'daily']} + + +def test_get_modified_attributes_exceptions(): + """ validate exceptions """ + current = {'schedule': {'name': 'weekly'}, 'state': 'present'} + desired = {'schedule': 'weekly', 'state': 'present'} + my_obj = create_ontap_module({'hostname': None}) + # mismatch in structure + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_modified_attributes, TypeError, current, desired) + assert "Expecting dict, got: weekly with current: {'name': 'weekly'}" in error + # mismatch in types + if sys.version_info[:2] > (3, 0): + # our cmp function reports an exception. But python 2.x has it's own version. + desired = {'schedule': {'name': 12345}, 'state': 'present'} + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_modified_attributes, TypeError, current, desired) + assert ("unorderable types:" in error # 3.5 + or "'>' not supported between instances of 'str' and 'int'" in error) # 3.9 + + +def test_get_modified_attributes_for_dicts(): + """ validate modified attributes for dict of dicts """ + current = {'schedule': {'name': 'weekly'}, 'state': 'present'} + desired = {'schedule': {'name': 'daily'}, 'state': 'present'} + my_obj = na_helper() + result = my_obj.get_modified_attributes(current, desired, True) + assert result == {'schedule': {'name': 'daily'}} + + +def test_is_rename_action_for_empty_input(): + """ validate rename action for input None """ + source = None + target = None + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result == source + + +def test_is_rename_action_for_no_source(): + """ validate rename action when source is None """ + source = None + target = 'test2' + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is False + + +def test_is_rename_action_for_no_target(): + """ validate rename action when target is None """ + source = 'test2' + target = None + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is True + + +def test_is_rename_action(): + """ validate rename action """ + source = 'test' + target = 'test2' + my_obj = na_helper() + result = my_obj.is_rename_action(source, target) + assert result is False + + +def test_required_is_not_set_to_none(): + """ if a key is present, without a value, Ansible sets it to None """ + my_obj = create_ontap_module({'hostname': None}) + msg = 'hostname requires a value, got: None' + assert msg == expect_and_capture_ansible_exception(my_obj.na_helper.check_and_set_parameters, 'fail', my_obj.module)['msg'] + + # force a value different than None + my_obj.module.params['hostname'] = 1 + my_params = my_obj.na_helper.check_and_set_parameters(my_obj.module) + assert set(my_params.keys()) == set(['hostname', 'https', 'validate_certs', 'use_rest']) + + +def test_sanitize_wwn_no_action(): + """ no change """ + initiator = 'tEsT' + expected = initiator + my_obj = na_helper() + result = my_obj.sanitize_wwn(initiator) + assert result == expected + + +def test_sanitize_wwn_no_action_valid_iscsi(): + """ no change """ + initiator = 'iqn.1995-08.com.eXaMpLe:StRiNg' + expected = initiator + my_obj = na_helper() + result = my_obj.sanitize_wwn(initiator) + assert result == expected + + +def test_sanitize_wwn_no_action_valid_wwn(): + """ no change """ + initiator = '01:02:03:04:0A:0b:0C:0d' + expected = initiator.lower() + my_obj = na_helper() + result = my_obj.sanitize_wwn(initiator) + assert result == expected + + +def test_filter_empty_dict(): + """ empty dict return empty dict """ + my_obj = na_helper() + arg = {} + result = my_obj.filter_out_none_entries(arg) + assert arg == result + + +def test_filter_empty_list(): + """ empty list return empty list """ + my_obj = na_helper() + arg = [] + result = my_obj.filter_out_none_entries(arg) + assert arg == result + + +def test_filter_typeerror_on_none(): + """ empty list return empty list """ + my_obj = na_helper() + arg = None + with pytest.raises(TypeError) as exc: + my_obj.filter_out_none_entries(arg) + if sys.version_info[:2] < (3, 0): + # the assert fails on 2.x + return + msg = "unexpected type " + assert exc.value.args[0] == msg + + +def test_filter_typeerror_on_str(): + """ empty list return empty list """ + my_obj = na_helper() + arg = "" + with pytest.raises(TypeError) as exc: + my_obj.filter_out_none_entries(arg) + if sys.version_info[:2] < (3, 0): + # the assert fails on 2.x + return + msg = "unexpected type " + assert exc.value.args[0] == msg + + +def test_filter_simple_dict(): + """ simple dict return simple dict """ + my_obj = na_helper() + arg = dict(a=None, b=1, c=None, d=2, e=3) + expected = dict(b=1, d=2, e=3) + result = my_obj.filter_out_none_entries(arg) + assert expected == result + + +def test_filter_simple_list(): + """ simple list return simple list """ + my_obj = na_helper() + arg = [None, 2, 3, None, 5] + expected = [2, 3, 5] + result = my_obj.filter_out_none_entries(arg) + assert expected == result + + +def test_filter_dict_dict(): + """ simple dict return simple dict """ + my_obj = na_helper() + arg = dict(a=None, b=dict(u=1, v=None, w=2), c={}, d=2, e=3) + expected = dict(b=dict(u=1, w=2), d=2, e=3) + result = my_obj.filter_out_none_entries(arg) + assert expected == result + + +def test_filter_list_list(): + """ simple list return simple list """ + my_obj = na_helper() + arg = [None, [1, None, 3], 3, None, 5] + expected = [[1, 3], 3, 5] + result = my_obj.filter_out_none_entries(arg) + assert expected == result + + +def test_filter_dict_list_dict(): + """ simple dict return simple dict """ + my_obj = na_helper() + arg = dict(a=None, b=[dict(u=1, v=None, w=2), 5, None, dict(x=6, y=None)], c={}, d=2, e=3) + expected = dict(b=[dict(u=1, w=2), 5, dict(x=6)], d=2, e=3) + result = my_obj.filter_out_none_entries(arg) + assert expected == result + + +def test_filter_list_dict_list(): + """ simple list return simple list """ + my_obj = na_helper() + arg = [None, [1, None, 3], dict(a=None, b=[7, None, 9], c=None, d=dict(u=None, v=10)), None, 5] + expected = [[1, 3], dict(b=[7, 9], d=dict(v=10)), 5] + result = my_obj.filter_out_none_entries(arg) + assert expected == result + + +def test_convert_value(): + """ positive tests """ + my_obj = na_helper() + for value, convert_to, expected in [ + ('any', None, 'any'), + (12345, None, 12345), + ('12345', int, 12345), + ('any', str, 'any'), + ('true', bool, True), + ('false', bool, False), + ('online', 'bool_online', True), + ('any', 'bool_online', False), + ]: + result, error = my_obj.convert_value(value, convert_to) + assert error is None + assert expected == result + + +def test_convert_value_with_error(): + """ negative tests """ + my_obj = na_helper() + for value, convert_to, expected in [ + (12345, 'any', "Unexpected type:"), + ('any', int, "Unexpected value for int: any"), + ('any', bool, "Unexpected value: any received from ZAPI for boolean attribute"), + ]: + result, error = my_obj.convert_value(value, convert_to) + print(value, convert_to, result, '"%s"' % expected, '"%s"' % error) + assert result is None + assert expected in error + + +def test_convert_value_with_exception(): + """ negative tests """ + my_obj = create_ontap_module({'hostname': None}) + expect_and_capture_ansible_exception(my_obj.na_helper.convert_value, 'fail', 'any', 'any') + + +def get_zapi_info(): + return { + 'a': {'b': '12345', 'bad_stuff': ['a', 'b'], 'none_stuff': None} + } + + +def get_zapi_na_element(zapi_info): + na_element, valid = build_zapi_response(zapi_info) + if valid != 'valid' and sys.version_info[:2] < (2, 7): + pytest.skip('Skipping Unit Tests on 2.6 as netapp-lib is not available') + assert valid == 'valid' + return na_element + + +def test_zapi_get_value(): + na_element = get_zapi_na_element(get_zapi_info()) + my_obj = na_helper() + assert my_obj.zapi_get_value(na_element, ['a', 'b'], convert_to=int) == 12345 + # missing key returns None if sparse dict is allowed (default) + assert my_obj.zapi_get_value(na_element, ['a', 'c'], convert_to=int) is None + # missing key returns 'default' - note, no conversion - if sparse dict is allowed (default) + assert my_obj.zapi_get_value(na_element, ['a', 'c'], convert_to=int, default='default') == 'default' + + +def test_zapi_get_value_with_exception(): + na_element = get_zapi_na_element(get_zapi_info()) + my_obj = create_ontap_module({'hostname': None}) + # KeyError + error = expect_and_capture_ansible_exception(my_obj.na_helper.zapi_get_value, 'fail', na_element, ['a', 'c'], required=True)['msg'] + assert 'No element by given name c.' in error + + +def test_safe_get(): + na_element = get_zapi_na_element(get_zapi_info()) + my_obj = na_helper() + assert my_obj.safe_get(na_element, ['a', 'b']) == '12345' + assert my_obj.safe_get(na_element, ['a', 'c']) is None + assert my_obj.safe_get(get_zapi_info(), ['a', 'b']) == '12345' + assert my_obj.safe_get(get_zapi_info(), ['a', 'c']) is None + assert my_obj.safe_get(get_zapi_info(), ['a', 'none_stuff', 'extra']) is None # TypeError on None + + +def test_safe_get_dict_of_list(): + my_obj = na_helper() + my_dict = {'a': ['b', 'c', {'d': ['e']}]} + assert my_obj.safe_get(my_dict, ['a', 0]) == 'b' + assert my_obj.safe_get(my_dict, ['a', 2, 'd', 0]) == 'e' + assert my_obj.safe_get(my_dict, ['a', 3]) is None + + +def test_safe_get_with_exception(): + na_element = get_zapi_na_element(get_zapi_info()) + my_obj = create_ontap_module({'hostname': None}) + # KeyError + error = expect_and_capture_ansible_exception(my_obj.na_helper.safe_get, KeyError, na_element, ['a', 'c'], allow_sparse_dict=False) + assert 'No element by given name c.' in error + error = expect_and_capture_ansible_exception(my_obj.na_helper.safe_get, KeyError, get_zapi_info(), ['a', 'c'], allow_sparse_dict=False) + assert 'c' == error + # IndexError + error = expect_and_capture_ansible_exception(my_obj.na_helper.safe_get, IndexError, get_zapi_info(), ['a', 'bad_stuff', 4], allow_sparse_dict=False) + print('EXC', error) + if ut_utilities.is_indexerror_exception_formatted(): + assert 'list index out of range' in str(error) + error = expect_and_capture_ansible_exception(my_obj.na_helper.safe_get, IndexError, get_zapi_info(), ['a', 'bad_stuff', -4], allow_sparse_dict=False) + print('EXC', error) + if ut_utilities.is_indexerror_exception_formatted(): + assert 'list index out of range' in str(error) + # TypeError - not sure I can build a valid ZAPI NaElement that can give a type error, but using a dict worked. + error = expect_and_capture_ansible_exception(my_obj.na_helper.safe_get, TypeError, get_zapi_info(), ['a', 'bad_stuff', 'extra'], allow_sparse_dict=False) + # 'list indices must be integers, not str' with 2.7 + # 'list indices must be integers or slices, not str' with 3.x + assert 'list indices must be integers' in error + error = expect_and_capture_ansible_exception(my_obj.na_helper.safe_get, TypeError, get_zapi_info(), ['a', 'none_stuff', 'extra'], allow_sparse_dict=False) + # 'NoneType' object has no attribute '__getitem__' with 2.7 + # 'NoneType' object is not subscriptable with 3.x + assert "'NoneType' object " in error + + +def test_get_value_for_bool(): + my_obj = na_helper() + for value, from_zapi, expected in [ + (None, 'any', None), + ('true', True, True), + ('false', True, False), + ('any', True, False), # no error checking if key is not present + (True, False, 'true'), + (False, False, 'false'), + ('any', False, 'true'), # no error checking if key is not present + ]: + result = my_obj.get_value_for_bool(from_zapi, value) + print(value, from_zapi, result) + assert result == expected + + +def test_get_value_for_bool_with_exception(): + na_element = get_zapi_na_element(get_zapi_info()) + my_obj = create_ontap_module({'hostname': None}) + # Error with from_zapi=True if key is present + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_value_for_bool, TypeError, True, 1234, 'key') + assert "expecting 'str' type for 'key': 1234" in error + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_value_for_bool, ValueError, True, 'any', 'key') + assert "Unexpected value: 'any' received from ZAPI for boolean attribute: 'key'" == error + # TypeError - expecting a bool + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_value_for_bool, TypeError, False, 'any', 'key') + assert "expecting 'bool' type for 'key': 'any'" in error + + +def test_get_value_for_int(): + my_obj = na_helper() + for value, from_zapi, expected in [ + (None, 'any', None), + ('12345', True, 12345), + (12345, True, 12345), # no error checking if key is not present + (12345, False, '12345'), + ]: + result = my_obj.get_value_for_int(from_zapi, value) + print(value, from_zapi, result) + assert result == expected + + +def test_get_value_for_int_with_exception(): + na_element = get_zapi_na_element(get_zapi_info()) + my_obj = create_ontap_module({'hostname': None}) + # Error with from_zapi=True if key is present + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_value_for_int, TypeError, True, 1234, 'key') + assert "expecting 'str' type for 'key': 1234" in error + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_value_for_int, ValueError, True, 'any', 'key') + assert "invalid literal for int() with base 10: 'any'" == error + # TypeError - expecting a int + error = expect_and_capture_ansible_exception(my_obj.na_helper.get_value_for_int, TypeError, False, 'any', 'key') + assert "expecting 'int' type for 'key': 'any'" in error + + +def test_get_value_for_list(): + my_obj = na_helper() + zapi_info = { + 'a': [{'b': 'a1'}, {'b': 'a2'}, {'b': 'a3'}] + } + for from_zapi, zapi_parent, zapi_child, data, expected in [ + (True, None, None, None, []), + (True, get_zapi_na_element(zapi_info), None, None, [None]), + (True, get_zapi_na_element(get_zapi_info()).get_child_by_name('a'), None, None, ['12345', None, None]), + (True, get_zapi_na_element(zapi_info).get_child_by_name('a'), None, None, ['a1', 'a2', 'a3']), + (False, 'parent', 'child', [], b''), + (False, 'parent', 'child', ['1', '1'], b'11'), + ]: + result = my_obj.get_value_for_list(from_zapi, zapi_parent, zapi_child, data) + print(from_zapi, expected, result) + if from_zapi: + if zapi_parent: + print(zapi_parent.to_string()) + # ordering maybe different with 3.5 compared to 3.9 or 2.7 + assert set(result) == set(expected) + else: + print(result.to_string()) + assert result.to_string() == expected + + +def test_zapi_get_attrs(): + my_obj = na_helper() + zapi_info = { + 'a': {'b': 'a1', 'c': 'a2', 'd': 'a3', 'int': '123'} + } + naelement = get_zapi_na_element(zapi_info) + attr_dict = { + 'first': {'key_list': ['a', 'b']} + } + result = {} + my_obj.zapi_get_attrs(naelement, attr_dict, result) + assert result == {'first': 'a1'} + + # if element not found return None, unless omitnone is True + attr_dict = { + 'none': {'key_list': ['a', 'z'], 'omitnone': True} + } + my_obj.zapi_get_attrs(naelement, attr_dict, result) + assert result == {'first': 'a1'} + + # if element not found return None when required and omitnone are False + attr_dict = { + 'none': {'key_list': ['a', 'z']} + } + my_obj.zapi_get_attrs(naelement, attr_dict, result) + assert result == {'first': 'a1', 'none': None} + + # if element not found return default + result = {} + attr_dict = { + 'none': {'key_list': ['a', 'z'], 'default': 'some_default'} + } + my_obj.zapi_get_attrs(naelement, attr_dict, result) + assert result == {'none': 'some_default'} + + # convert to int + result = {} + attr_dict = { + 'int': {'key_list': ['a', 'int'], 'convert_to': int} + } + my_obj.zapi_get_attrs(naelement, attr_dict, result) + assert result == {'int': 123} + + # if element not found return None, unless required is True + my_obj = create_ontap_module({'hostname': 'abc'}) + attr_dict = { + 'none': {'key_list': ['a', 'z'], 'required': True} + } + # the contents of to_string() may be in a different sequence depending on the pytohn version + assert expect_and_capture_ansible_exception(my_obj.na_helper.zapi_get_attrs, 'fail', naelement, attr_dict, result)['msg'].startswith(( + "Error reading ['a', 'z'] from b'", # python 3.x + "Error reading ['a', 'z'] from " # python 2.7 + )) + + +def test_set_parameters(): + my_obj = na_helper() + adict = dict((x, x * x) for x in range(10)) + assert my_obj.set_parameters(adict) == adict + assert my_obj.parameters == adict + assert len(my_obj.parameters) == 10 + + # None values are copied + adict[3] = None + assert my_obj.set_parameters(adict) != adict + assert my_obj.parameters != adict + assert len(my_obj.parameters) == 9 + + +def test_get_caller(): + assert na_helper.get_caller(0) == 'get_caller' + assert na_helper.get_caller(1) == 'test_get_caller' + + def one(depth): + return na_helper.get_caller(depth) + assert one(1) == 'one' + + def two(): + return one(2) + assert two() == 'two' + + def three(): + return two(), one(3) + assert three() == ('two', 'test_get_caller') + + +@patch('traceback.extract_stack') +def test_get_caller_2_7(mock_frame): + frame = ('first', 'second', 'function_name') + mock_frame.return_value = [frame] + assert na_helper.get_caller(0) == 'function_name' + + +@patch('traceback.extract_stack') +def test_get_caller_bad_format(mock_frame): + frame = ('first', 'second') + mock_frame.return_value = [frame] + assert na_helper.get_caller(0) == "Error retrieving function name: tuple index out of range - [('first', 'second')]" + + +def test_fail_on_error(): + my_obj = create_ontap_module({'hostname': 'abc'}) + assert my_obj.na_helper.fail_on_error(None) is None + assert expect_and_capture_ansible_exception(my_obj.na_helper.fail_on_error, 'fail', 'error_msg')['msg'] ==\ + 'Error in expect_and_capture_ansible_exception: error_msg' + assert expect_and_capture_ansible_exception(my_obj.na_helper.fail_on_error, 'fail', 'error_msg', 'api')['msg'] ==\ + 'Error in expect_and_capture_ansible_exception: calling api: api: error_msg' + previous_errors = ['some_errror'] + exc = expect_and_capture_ansible_exception(my_obj.na_helper.fail_on_error, 'fail', 'error_msg', 'api', previous_errors=previous_errors) + assert exc['msg'] == 'Error in expect_and_capture_ansible_exception: calling api: api: error_msg' + assert exc['previous_errors'] == previous_errors[0] + exc = expect_and_capture_ansible_exception(my_obj.na_helper.fail_on_error, 'fail', 'error_msg', 'api', True) + assert exc['msg'] == 'Error in expect_and_capture_ansible_exception: calling api: api: error_msg' + assert exc['stack'] + delattr(my_obj.na_helper, 'ansible_module') + assert expect_and_capture_ansible_exception(my_obj.na_helper.fail_on_error, AttributeError, 'error_message') ==\ + "Expecting self.ansible_module to be set when reporting {'msg': 'Error in expect_and_capture_ansible_exception: error_message'}" + + +def test_cmp(): + assert na_cmp(None, 'any') == -1 + # string comparison ignores case + assert na_cmp('ABC', 'abc') == 0 + assert na_cmp('abcd', 'abc') == 1 + assert na_cmp('abd', 'abc') == 1 + assert na_cmp(['abd', 'abc'], ['abc', 'abd']) == 0 + # list comparison ignores case + assert na_cmp(['ABD', 'abc'], ['abc', 'abd']) == 0 + # but not duplicates + assert na_cmp(['ABD', 'ABD', 'abc'], ['abc', 'abd']) == 1 + + +def test_fall_back_to_zapi(): + my_obj = create_ontap_module({'hostname': 'abc'}, version=2) + parameters = {'use_rest': 'never'} + assert my_obj.na_helper.fall_back_to_zapi(my_obj.na_helper.ansible_module, 'some message', parameters) is None + assert_no_warnings() + + parameters = {'use_rest': 'auto'} + assert my_obj.na_helper.fall_back_to_zapi(my_obj.na_helper.ansible_module, 'some message', parameters) is False + assert_warning_was_raised('Falling back to ZAPI: some message') + + parameters = {'use_rest': 'always'} + clear_warnings() + assert 'Error: some message' in expect_and_capture_ansible_exception( + my_obj.na_helper.fall_back_to_zapi, 'fail', my_obj.na_helper.ansible_module, 'some message', parameters)['msg'] + assert_no_warnings() + + +def test_module_deprecated(): + my_obj = create_ontap_module({'hostname': 'abc'}) + assert my_obj.na_helper.module_deprecated(my_obj.na_helper.ansible_module) is None + assert_warning_was_raised('This module only supports ZAPI and is deprecated. It will no longer work with newer versions of ONTAP. ' + 'The final ONTAP version to support ZAPI is ONTAP 9.12.1.') + + +def test_module_replaces(): + my_obj = create_ontap_module({'hostname': 'abc'}) + new_module = 'na_ontap_new_modules' + assert my_obj.na_helper.module_replaces(new_module, my_obj.na_helper.ansible_module) is None + assert_warning_was_raised('netapp.ontap.%s should be used instead.' % new_module) + + +def test_compare_chmod_value(): + myobj = na_helper() + assert myobj.compare_chmod_value("0777", "---rwxrwxrwx") is True + assert myobj.compare_chmod_value("777", "---rwxrwxrwx") is True + assert myobj.compare_chmod_value("7777", "sstrwxrwxrwx") is True + assert myobj.compare_chmod_value("4555", "s--r-xr-xr-x") is True + assert myobj.compare_chmod_value(None, "---rwxrwxrwx") is False + assert myobj.compare_chmod_value("755", "rwxrwxrwxrwxr") is False + assert myobj.compare_chmod_value("777", "---ssxrwxrwx") is False + assert myobj.compare_chmod_value("7777", "rwxrwxrwxrwx") is False + assert myobj.compare_chmod_value("7777", "7777") is True + + +def test_ignore_missing_vserver_on_delete(): + my_obj = create_ontap_module({'hostname': 'abc'}) + assert not my_obj.na_helper.ignore_missing_vserver_on_delete('error') + my_obj.na_helper.parameters['state'] = 'absent' + error = 'Internal error, vserver name is required, when processing error: error_msg' + assert error in expect_and_capture_ansible_exception(my_obj.na_helper.ignore_missing_vserver_on_delete, 'fail', 'error_msg')['msg'] + my_obj.na_helper.parameters['vserver'] = 'svm' + error = 'Internal error, error should contain "message" key, found:' + assert error in expect_and_capture_ansible_exception(my_obj.na_helper.ignore_missing_vserver_on_delete, 'fail', {'error_msg': 'error'})['msg'] + error = 'Internal error, error should be str or dict, found:' + assert error in expect_and_capture_ansible_exception(my_obj.na_helper.ignore_missing_vserver_on_delete, 'fail', ['error_msg'])['msg'] + assert not my_obj.na_helper.ignore_missing_vserver_on_delete('error') + assert my_obj.na_helper.ignore_missing_vserver_on_delete({'message': 'SVM "svm" does not exist.'}) + + +def test_remove_hal_links(): + my_obj = create_ontap_module({'hostname': 'abc'}) + assert my_obj.na_helper.remove_hal_links(None) is None + assert my_obj.na_helper.remove_hal_links('string') is None + adict = { + '_links': 'whatever', + 'other': 'other' + } + # dict + test_object = copy.deepcopy(adict) + assert my_obj.na_helper.remove_hal_links(test_object) is None + assert '_links' not in test_object + # list of dicts + test_object = [copy.deepcopy(adict)] * 5 + assert my_obj.na_helper.remove_hal_links(test_object) is None + assert all('_links' not in elem for elem in test_object) + # dict of dicts + test_object = {'a': copy.deepcopy(adict), 'b': copy.deepcopy(adict)} + assert my_obj.na_helper.remove_hal_links(test_object) is None + assert all('_links' not in value for value in test_object.values()) + # list of list of dicts + items = [copy.deepcopy(adict)] * 5 + test_object = [items, items] + assert my_obj.na_helper.remove_hal_links(test_object) is None + assert all('_links' not in elem for elems in test_object for elem in elems) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_rest.py new file mode 100644 index 000000000..98cac5390 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_rest.py @@ -0,0 +1,586 @@ +# Copyright (c) 2018-2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py - REST features ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os.path +import pytest +import sys +import tempfile + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import call, patch + +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + assert_no_warnings, assert_warning_was_raised, create_module, expect_and_capture_ansible_exception, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +VERSION = {'version': { + 'full': '9.8.45', + 'generation': 9, + 'major': 8, + 'minor': 45 +}} + +SRR = rest_responses({ + 'vservers_with_admin': (200, { + 'records': [ + {'vserver': 'vserver1', 'type': 'data '}, + {'vserver': 'vserver2', 'type': 'data '}, + {'vserver': 'cserver', 'type': 'admin'} + ]}, None), + 'vservers_without_admin': (200, { + 'records': [ + {'vserver': 'vserver1', 'type': 'data '}, + {'vserver': 'vserver2', 'type': 'data '}, + ]}, None), + 'vservers_single': (200, { + 'records': [ + {'vserver': 'single', 'type': 'data '}, + ]}, None), + 'vservers_empty': (200, {}, None), + 'vservers_error': (200, { + 'records': [ + {'vserver': 'single', 'type': 'data '}, + ]}, 'some error'), + 'nodes': (200, { + 'records': [ + VERSION, + {'node': 'node2', 'version': 'version'}, + ]}, None), + 'precluster_error': (400, {}, {'message': 'are available in precluster.'}), +}) + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + +CERT_ARGS = { + 'hostname': 'test', + 'cert_filepath': 'test_pem.pem', + 'key_filepath': 'test_key.key' +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_restapi_object(default_args, module_args=None): + module = create_module(MockONTAPModule, default_args, module_args) + return netapp_utils.OntapRestAPI(module.module) + + +def test_write_to_file(): + ''' check error and debug logs can be written to disk ''' + rest_api = create_restapi_object(DEFAULT_ARGS) + # logging an error also add a debug record + rest_api.log_error(404, '404 error') + print(rest_api.errors) + print(rest_api.debug_logs) + # logging a debug record only + rest_api.log_debug(501, '501 error') + print(rest_api.errors) + print(rest_api.debug_logs) + + try: + tempdir = tempfile.TemporaryDirectory() + filepath = os.path.join(tempdir.name, 'log.txt') + except AttributeError: + # python 2.7 does not support tempfile.TemporaryDirectory + # we're taking a small chance that there is a race condition + filepath = '/tmp/deleteme354.txt' + rest_api.write_debug_log_to_file(filepath=filepath, append=False) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 4 + assert lines[0].strip() == 'Debug: 404' + assert lines[2].strip() == 'Debug: 501' + + # Idempotent, as append is False + rest_api.write_debug_log_to_file(filepath=filepath, append=False) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 4 + assert lines[0].strip() == 'Debug: 404' + assert lines[2].strip() == 'Debug: 501' + + # Duplication, as append is True + rest_api.write_debug_log_to_file(filepath=filepath, append=True) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 8 + assert lines[0].strip() == 'Debug: 404' + assert lines[2].strip() == 'Debug: 501' + assert lines[4].strip() == 'Debug: 404' + assert lines[6].strip() == 'Debug: 501' + + rest_api.write_errors_to_file(filepath=filepath, append=False) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 1 + assert lines[0].strip() == 'Error: 404 error' + + # Idempotent, as append is False + rest_api.write_errors_to_file(filepath=filepath, append=False) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 1 + assert lines[0].strip() == 'Error: 404 error' + + # Duplication, as append is True + rest_api.write_errors_to_file(filepath=filepath, append=True) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 2 + assert lines[0].strip() == 'Error: 404 error' + assert lines[1].strip() == 'Error: 404 error' + + # Empty data + rest_api.write_to_file(tag='atag', filepath=filepath, append=False) + with open(filepath, 'r') as log: + lines = log.readlines() + assert len(lines) == 1 + assert lines[0].strip() == 'atag' + + builtins = 'builtins' if sys.version_info > (3, 0) else '__builtin__' + + with patch('%s.open' % builtins) as mock_open: + mock_open.side_effect = KeyError('Open error') + exc = expect_and_capture_ansible_exception(rest_api.write_to_file, KeyError, tag='atag') + assert str(exc) == 'Open error' + print(mock_open.mock_calls) + assert call('/tmp/ontap_log', 'a') in mock_open.mock_calls + + +def test_is_rest_true(): + ''' is_rest is expected to return True ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + is_rest = rest_api.is_rest() + print(rest_api.errors) + print(rest_api.debug_logs) + assert is_rest + + +def test_is_rest_false(): + ''' is_rest is expected to return False ''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + is_rest = rest_api.is_rest() + print(rest_api.errors) + print(rest_api.debug_logs) + assert not is_rest + assert rest_api.errors[0] == SRR['is_zapi'][2] + assert rest_api.debug_logs[0][0] == SRR['is_zapi'][0] # status_code + assert rest_api.debug_logs[0][1] == SRR['is_zapi'][2] # error + + +def test_is_rest_false_9_5(): + ''' is_rest is expected to return False ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_95']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + is_rest = rest_api.is_rest() + print(rest_api.errors) + print(rest_api.debug_logs) + assert not is_rest + assert not rest_api.errors + assert not rest_api.debug_logs + + +def test_is_rest_true_9_6(): + ''' is_rest is expected to return False ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + is_rest = rest_api.is_rest() + print(rest_api.errors) + print(rest_api.debug_logs) + assert is_rest + assert not rest_api.errors + assert not rest_api.debug_logs + + +def test_fail_has_username_password_and_cert(): + ''' failure case in auth_method ''' + module_args = dict(cert_filepath='dummy') + msg = 'Error: cannot have both basic authentication (username/password) and certificate authentication (cert/key files)' + assert expect_and_capture_ansible_exception(create_restapi_object, 'fail', DEFAULT_ARGS, module_args)['msg'] == msg + + +def test_fail_has_username_password_and_key(): + ''' failure case in auth_method ''' + module_args = dict(key_filepath='dummy') + msg = 'Error: cannot have both basic authentication (username/password) and certificate authentication (cert/key files)' + assert expect_and_capture_ansible_exception(create_restapi_object, 'fail', DEFAULT_ARGS, module_args)['msg'] == msg + + +def test_fail_has_username_and_cert(): + ''' failure case in auth_method ''' + args = dict(DEFAULT_ARGS) + module_args = dict(cert_filepath='dummy') + del args['password'] + msg = 'Error: username and password have to be provided together and cannot be used with cert or key files' + assert expect_and_capture_ansible_exception(create_restapi_object, 'fail', args, module_args)['msg'] == msg + + +def test_fail_has_password_and_cert(): + ''' failure case in auth_method ''' + args = dict(DEFAULT_ARGS) + module_args = dict(cert_filepath='dummy') + del args['username'] + msg = 'Error: username and password have to be provided together and cannot be used with cert or key files' + assert expect_and_capture_ansible_exception(create_restapi_object, 'fail', args, module_args)['msg'] == msg + + +def test_has_username_password(): + ''' auth_method reports expected value ''' + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_api.auth_method == 'speedy_basic_auth' + + +def test_has_cert_no_key(): + ''' auth_method reports expected value ''' + args = dict(CERT_ARGS) + del args['key_filepath'] + rest_api = create_restapi_object(args) + assert rest_api.auth_method == 'single_cert' + + +def test_has_cert_and_key(): + ''' auth_method reports expected value ''' + rest_api = create_restapi_object(CERT_ARGS) + assert rest_api.auth_method == 'cert_key' + + +def test_get_cserver(): + ''' using REST to get cserver - not sure if it's needed ''' + register_responses([ + ('GET', 'private/cli/vserver', SRR['vservers_with_admin']), + ('GET', 'private/cli/vserver', SRR['vservers_without_admin']), + ('GET', 'private/cli/vserver', SRR['vservers_single']), + ('GET', 'private/cli/vserver', SRR['vservers_empty']), + ('GET', 'private/cli/vserver', SRR['vservers_error']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + assert netapp_utils.get_cserver(rest_api, is_rest=True) == 'cserver' + assert netapp_utils.get_cserver(rest_api, is_rest=True) is None + assert netapp_utils.get_cserver(rest_api, is_rest=True) == 'single' + assert netapp_utils.get_cserver(rest_api, is_rest=True) is None + assert netapp_utils.get_cserver(rest_api, is_rest=True) is None + + +def test_ontaprestapi_init(): + module_args = {'http_port': 123} + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_api.url == 'https://%s/api/' % DEFAULT_ARGS['hostname'] + rest_api = create_restapi_object(DEFAULT_ARGS, module_args) + assert rest_api.url == 'https://%s:%d/api/' % (DEFAULT_ARGS['hostname'], module_args['http_port']) + + +@patch('logging.basicConfig') +def test_ontaprestapi_logging(mock_config): + create_restapi_object(DEFAULT_ARGS) + assert not mock_config.mock_calls + module_args = {'feature_flags': {'trace_apis': True}} + create_restapi_object(DEFAULT_ARGS, module_args) + assert len(mock_config.mock_calls) == 1 + + +def test_requires_ontap_9_6(): + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_api.requires_ontap_9_6('module_name') == 'module_name only supports REST, and requires ONTAP 9.6 or later.' + + +def test_requires_ontap_version(): + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_api.requires_ontap_version('module_name', '9.1.2') == 'module_name only supports REST, and requires ONTAP 9.1.2 or later.' + + +def test_options_require_ontap_version(): + rest_api = create_restapi_object(DEFAULT_ARGS) + base = 'using %s requires ONTAP 9.1.2 or later and REST must be enabled' + msg = base % 'option_name' + msg_m = base % "any of ['op1', 'op2', 'op3']" + assert rest_api.options_require_ontap_version('option_name', '9.1.2') == '%s.' % msg + assert rest_api.options_require_ontap_version('option_name', '9.1.2', use_rest=True) == '%s - using REST.' % msg + assert rest_api.options_require_ontap_version('option_name', '9.1.2', use_rest=False) == '%s - using ZAPI.' % msg + assert rest_api.options_require_ontap_version(['op1', 'op2', 'op3'], '9.1.2') == '%s.' % msg_m + rest_api.set_version(VERSION) + assert rest_api.options_require_ontap_version(['option_name'], '9.1.2') == '%s - ONTAP version: %s.' % (msg, VERSION['version']['full']) + assert rest_api.options_require_ontap_version(['op1', 'op2', 'op3'], '9.1.2', use_rest=True) ==\ + '%s - ONTAP version: %s - using REST.' % (msg_m, VERSION['version']['full']) + + +def test_meets_rest_minimum_version(): + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.set_version(VERSION) + assert rest_api.meets_rest_minimum_version(True, VERSION['version']['generation'], VERSION['version']['major']) + assert rest_api.meets_rest_minimum_version(True, VERSION['version']['generation'], VERSION['version']['major'] - 1) + assert not rest_api.meets_rest_minimum_version(True, VERSION['version']['generation'], VERSION['version']['major'] + 1) + assert not rest_api.meets_rest_minimum_version(True, VERSION['version']['generation'], VERSION['version']['major'], VERSION['version']['minor'] + 1) + + +def test_fail_if_not_rest_minimum_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.use_rest = 'never' + # validate consistency bug in fail_if_not_rest_minimum_version + assert expect_and_capture_ansible_exception(rest_api.fail_if_not_rest_minimum_version, 'fail', 'module_name', 9, 6)['msg'] ==\ + 'Error: REST is required for this module, found: "use_rest: never".' + # never + rest_api = create_restapi_object(DEFAULT_ARGS, {'use_rest': 'never'}) + assert expect_and_capture_ansible_exception(rest_api.fail_if_not_rest_minimum_version, 'fail', 'module_name', 9, 6)['msg'] ==\ + 'Error: REST is required for this module, found: "use_rest: never".' + # REST error + rest_api = create_restapi_object(DEFAULT_ARGS, {'use_rest': 'auto'}) + assert expect_and_capture_ansible_exception(rest_api.fail_if_not_rest_minimum_version, 'fail', 'module_name', 9, 6)['msg'] ==\ + 'Error using REST for version, error: Expected error. Error using REST for version, status_code: 400.' + # version mismatch + assert expect_and_capture_ansible_exception(rest_api.fail_if_not_rest_minimum_version, 'fail', 'module_name', 9, 7)['msg'] ==\ + 'Error: module_name only supports REST, and requires ONTAP 9.7.0 or later. Found: 9.6.0.' + # version match + assert rest_api.fail_if_not_rest_minimum_version('module_name', 9, 6) is None + + +def test_check_required_library(): + rest_api = create_restapi_object(DEFAULT_ARGS) + msg = 'Failed to import the required Python library (requests)' + with patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_REQUESTS', False): + assert expect_and_capture_ansible_exception(rest_api.check_required_library, 'fail')['msg'].startswith(msg) + + +def test_build_headers(): + rest_api = create_restapi_object(DEFAULT_ARGS) + app_version = 'basic.py/%s' % netapp_utils.COLLECTION_VERSION + assert rest_api.build_headers() == {'X-Dot-Client-App': app_version} + assert rest_api.build_headers(accept='accept') == {'X-Dot-Client-App': app_version, 'accept': 'accept'} + assert rest_api.build_headers(vserver_name='vserver_name') == {'X-Dot-Client-App': app_version, 'X-Dot-SVM-Name': 'vserver_name'} + assert rest_api.build_headers(vserver_uuid='vserver_uuid') == {'X-Dot-Client-App': app_version, 'X-Dot-SVM-UUID': 'vserver_uuid'} + assert len(rest_api.build_headers(accept='accept', vserver_name='name', vserver_uuid='uuid')) == 4 + + +def test_get_method(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + assert create_restapi_object(DEFAULT_ARGS).get('cluster') == (SRR['is_rest_96'][1], None) + + +def test_post_method(): + register_responses([ + ('POST', 'cluster', SRR['is_rest_96']), + ]) + assert create_restapi_object(DEFAULT_ARGS).post('cluster', None) == (SRR['is_rest_96'][1], None) + + +def test_patch_method(): + register_responses([ + ('PATCH', 'cluster', SRR['is_rest_96']), + ]) + assert create_restapi_object(DEFAULT_ARGS).patch('cluster', None) == (SRR['is_rest_96'][1], None) + + +def test_delete_method(): + register_responses([ + ('DELETE', 'cluster', SRR['is_rest_96']), + ]) + assert create_restapi_object(DEFAULT_ARGS).delete('cluster', None) == (SRR['is_rest_96'][1], None) + + +def test_options_method(): + register_responses([ + ('OPTIONS', 'cluster', SRR['is_rest_96']), + ]) + assert create_restapi_object(DEFAULT_ARGS).options('cluster', None) == (SRR['is_rest_96'][1], None) + + +def test_get_node_version_using_rest(): + register_responses([ + ('GET', 'cluster/nodes', SRR['nodes']), + ]) + assert create_restapi_object(DEFAULT_ARGS).get_node_version_using_rest() == (200, SRR['nodes'][1]['records'][0], None) + + +def test_get_ontap_version_using_rest(): + register_responses([ + ('GET', 'cluster', SRR['precluster_error']), + ('GET', 'cluster/nodes', SRR['nodes']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_api.get_ontap_version_using_rest() == 200 + assert rest_api.ontap_version['major'] == VERSION['version']['major'] + assert rest_api.ontap_version['valid'] + + +def test__is_rest(): + if not sys.version_info > (3, 0): + return + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.use_rest = 'invalid' + msg = "use_rest must be one of: never, always, auto. Got: 'invalid'" + assert rest_api._is_rest() == (False, msg) + # testing always with used_unsupported_rest_properties + rest_api.use_rest = 'always' + msg = "REST API currently does not support 'xyz'" + assert rest_api._is_rest(used_unsupported_rest_properties=['xyz']) == (True, msg) + # testing never + rest_api.use_rest = 'never' + assert rest_api._is_rest() == (False, None) + # we need the version + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + # testing always unconditionnally and with partially_supported_rest_properties + rest_api.use_rest = 'always' + msg = 'Error: Minimum version of ONTAP for xyz is (9, 7). Current version: (9, 6, 0).' + assert rest_api._is_rest(partially_supported_rest_properties=[('xyz', (9, 7))], parameters=['xyz']) == (True, msg) + # No error when version requirement is matched + assert rest_api._is_rest(partially_supported_rest_properties=[('xyz', (9, 6))], parameters=['xyz']) == (True, None) + # No error when parameter is not used + assert rest_api._is_rest(partially_supported_rest_properties=[('abc', (9, 6))], parameters=['xyz']) == (True, None) + # testing auto with used_unsupported_rest_properties + rest_api.use_rest = 'auto' + assert rest_api._is_rest(used_unsupported_rest_properties=['xyz']) == (False, None) + # TODO: check warning + + +def test_is_rest_supported_properties(): + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.use_rest = 'always' + assert expect_and_capture_ansible_exception(rest_api.is_rest_supported_properties, 'fail', ['xyz'], ['xyz'])['msg'] ==\ + "REST API currently does not support 'xyz'" + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + assert rest_api.is_rest_supported_properties(['abc'], ['xyz']) + assert rest_api.is_rest_supported_properties(['abc'], ['xyz'], report_error=True) == (True, None) + + +def test_is_rest_partially_supported_properties(): + if not sys.version_info > (3, 0): + return + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.use_rest = 'auto' + assert not rest_api.is_rest_supported_properties(['xyz'], None, [('xyz', (9, 8, 1))]) + assert_warning_was_raised('Falling back to ZAPI because of unsupported option(s) or option value(s) "xyz" in REST require (9, 8, 1)') + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.use_rest = 'auto' + assert rest_api.is_rest_supported_properties(['xyz'], None, [('xyz', (9, 8, 1))]) + + +def test_is_rest(): + rest_api = create_restapi_object(DEFAULT_ARGS) + # testing always with used_unsupported_rest_properties + rest_api.use_rest = 'always' + msg = "REST API currently does not support 'xyz'" + assert rest_api.is_rest(used_unsupported_rest_properties=['xyz']) == (True, msg) + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + assert rest_api.is_rest() + + +def test_set_version(): + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.set_version(VERSION) + print('VERSION', rest_api.ontap_version) + assert rest_api.ontap_version['generation'] == VERSION['version']['generation'] + assert rest_api.ontap_version['valid'] + rest_api.set_version({}) + assert not rest_api.ontap_version['valid'] + + +def test_force_ontap_version_local(): + """ test get_ontap_version_from_params in isolation """ + rest_api = create_restapi_object(DEFAULT_ARGS) + rest_api.set_version(VERSION) + print('VERSION', rest_api.ontap_version) + assert rest_api.ontap_version['generation'] == VERSION['version']['generation'] + # same version + rest_api.force_ontap_version = VERSION['version']['full'] + assert not rest_api.get_ontap_version_from_params() + # different versions + rest_api.force_ontap_version = '10.8.1' + warning = rest_api.get_ontap_version_from_params() + assert rest_api.ontap_version['generation'] != VERSION['version']['generation'] + assert rest_api.ontap_version['generation'] == 10 + assert 'Forcing ONTAP version to 10.8.1 but current version is 9.8.45' in warning + # version could not be read + rest_api.set_version({}) + rest_api.force_ontap_version = '10.8' + warning = rest_api.get_ontap_version_from_params() + assert rest_api.ontap_version['generation'] != VERSION['version']['generation'] + assert rest_api.ontap_version['generation'] == 10 + assert rest_api.ontap_version['minor'] == 0 + assert 'Forcing ONTAP version to 10.8, unable to read current version:' in warning + + +def test_negative_force_ontap_version_local(): + """ test get_ontap_version_from_params in isolation """ + rest_api = create_restapi_object(DEFAULT_ARGS) + # non numeric + rest_api.force_ontap_version = '9.8P4' + error = 'Error: unexpected format in force_ontap_version, expecting G.M.m or G.M, as in 9.10.1, got: 9.8P4,' + assert error in expect_and_capture_ansible_exception(rest_api.get_ontap_version_from_params, 'fail')['msg'] + # too short + rest_api.force_ontap_version = '9' + error = 'Error: unexpected format in force_ontap_version, expecting G.M.m or G.M, as in 9.10.1, got: 9,' + assert error in expect_and_capture_ansible_exception(rest_api.get_ontap_version_from_params, 'fail')['msg'] + # too long + rest_api.force_ontap_version = '9.1.2.3' + error = 'Error: unexpected format in force_ontap_version, expecting G.M.m or G.M, as in 9.10.1, got: 9.1.2.3,' + assert error in expect_and_capture_ansible_exception(rest_api.get_ontap_version_from_params, 'fail')['msg'] + + +def test_force_ontap_version_rest_call(): + """ test get_ontap_version_using_rest with force_ontap_version option """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['generic_error']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + # same version + rest_api.force_ontap_version = '9.7' + assert rest_api.get_ontap_version_using_rest() == 200 + assert_no_warnings() + # different versions + rest_api.force_ontap_version = '10.8.1' + assert rest_api.get_ontap_version_using_rest() == 200 + assert rest_api.ontap_version['generation'] == 10 + assert_warning_was_raised('Forcing ONTAP version to 10.8.1 but current version is dummy_9_9_0') + # version could not be read + assert rest_api.get_ontap_version_using_rest() == 200 + assert_warning_was_raised('Forcing ONTAP version to 10.8.1, unable to read current version: error: Expected error, status_code: 400') + assert rest_api.ontap_version['generation'] == 10 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_send_request.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_send_request.py new file mode 100644 index 000000000..f6ae38f21 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_send_request.py @@ -0,0 +1,271 @@ +# Copyright (c) 2018 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + create_module, expect_and_capture_ansible_exception + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + +SINGLE_CERT_ARGS = { + 'hostname': 'test', + 'username': None, + 'password': None, + 'cert_filepath': 'cert_file', + 'key_filepath': None, +} + +CERT_KEY_ARGS = { + 'hostname': 'test', + 'username': None, + 'password': None, + 'cert_filepath': 'cert_file', + 'key_filepath': 'key_file', +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_restapi_object(default_args): + module = create_module(MockONTAPModule, default_args) + return netapp_utils.OntapRestAPI(module.module) + + +class mockResponse: + def __init__(self, json_data, status_code, raise_action=None, headers=None, text=None): + self.json_data = json_data + self.status_code = status_code + self.content = json_data + self.raise_action = raise_action + self.headers = headers or {} + self.text = text + + def raise_for_status(self): + if self.status_code >= 400 and self.status_code < 600: + raise netapp_utils.requests.exceptions.HTTPError('status_code: %s' % self.status_code, response=self) + + def json(self): + if self.raise_action == 'bad_json': + raise ValueError(self.raise_action) + return self.json_data + + +@patch('requests.request') +def test_empty_get_sent_bad_json(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='anything', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(DEFAULT_ARGS) + message, error = rest_api.get('api', None) + assert error + assert 'Expecting json, got: anything' in error + print('errors:', rest_api.errors) + print('debug:', rest_api.debug_logs) + + +@patch('requests.request') +def test_empty_get_sent_bad_but_empty_json(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(DEFAULT_ARGS) + message, error = rest_api.get('api', None) + assert not error + + +def test_wait_on_job_bad_url(): + ''' URL format error ''' + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'testme' + job = dict(_links=dict(self=dict(href=api))) + message, error = rest_api.wait_on_job(job) + msg = "URL Incorrect format: list index out of range - Job: {'_links': {'self': {'href': 'testme'}}}" + assert msg in error + + +@patch('time.sleep') +@patch('requests.request') +def test_wait_on_job_timeout(mock_request, sleep_mock): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + job = dict(_links=dict(self=dict(href=api))) + message, error = rest_api.wait_on_job(job) + msg = 'Timeout error: Process still running' + assert msg in error + + +@patch('time.sleep') +@patch('requests.request') +def test_wait_on_job_job_error(mock_request, sleep_mock): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data=dict(error='Job error message'), status_code=200) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + job = dict(_links=dict(self=dict(href=api))) + message, error = rest_api.wait_on_job(job) + msg = 'Job error message' + assert msg in error + + +@patch('time.sleep') +@patch('requests.request') +def test_wait_on_job_job_failure(mock_request, dont_sleep): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data=dict(error='Job error message', state='failure', message='failure message'), status_code=200) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + job = dict(_links=dict(self=dict(href=api))) + message, error = rest_api.wait_on_job(job) + msg = 'failure message' + assert msg in error + assert not message + + +@patch('time.sleep') +@patch('requests.request') +def test_wait_on_job_timeout_running(mock_request, sleep_mock): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data=dict(error='Job error message', state='running', message='any message'), status_code=200) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + job = dict(_links=dict(self=dict(href=api))) + message, error = rest_api.wait_on_job(job) + msg = 'Timeout error: Process still running' + assert msg in error + assert message == 'any message' + + +@patch('time.sleep') +@patch('requests.request') +def test_wait_on_job(mock_request, dont_sleep): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data=dict(error='Job error message', state='other', message='any message'), status_code=200) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + job = dict(_links=dict(self=dict(href=api))) + message, error = rest_api.wait_on_job(job) + msg = 'Job error message' + assert msg in error + assert message == 'any message' + + +@patch('requests.request') +def test_get_auth_single_cert(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='', status_code=200) + rest_api = create_restapi_object(SINGLE_CERT_ARGS) + api = 'api/testme' + # rest_api.auth_method = 'single_cert' + message, error = rest_api.get(api, None) + print(mock_request.mock_calls) + assert rest_api.auth_method == 'single_cert' + assert "cert='cert_file'" in str(mock_request.mock_calls[0]) + + +@patch('requests.request') +def test_get_auth_cert_key(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='', status_code=200) + rest_api = create_restapi_object(CERT_KEY_ARGS) + api = 'api/testme' + # rest_api.auth_method = 'single_cert' + message, error = rest_api.get(api, None) + print(mock_request.mock_calls) + assert rest_api.auth_method == 'cert_key' + assert "cert=('cert_file', 'key_file')" in str(mock_request.mock_calls[0]) + + +def test_get_auth_method_keyerror(): + my_cx = create_restapi_object(CERT_KEY_ARGS) + my_cx.auth_method = 'invalid_method' + args = ('method', 'api', 'params') + msg = 'xxxx' + assert expect_and_capture_ansible_exception(my_cx.send_request, KeyError, *args) == 'invalid_method' + + +@patch('requests.request') +def test_http_error_no_json(mock_request): + ''' get raises HTTPError ''' + mock_request.return_value = mockResponse(json_data={}, status_code=400) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + message, error = rest_api.get(api) + assert error == 'status_code: 400' + + +@patch('requests.request') +def test_http_error_with_json_error_field(mock_request): + ''' get raises HTTPError ''' + mock_request.return_value = mockResponse(json_data=dict(state='other', message='any message', error='error_message'), status_code=400) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + message, error = rest_api.get(api) + assert error == 'error_message' + + +@patch('requests.request') +def test_http_error_attribute_error(mock_request): + ''' get raises HTTPError ''' + mock_request.return_value = mockResponse(json_data='bad_data', status_code=400) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + message, error = rest_api.get(api) + assert error == 'status_code: 400' + + +@patch('requests.request') +def test_connection_error(mock_request): + ''' get raises HTTPError ''' + mock_request.side_effect = netapp_utils.requests.exceptions.ConnectionError('connection_error') + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + message, error = rest_api.get(api) + # print(rest_api.errors) + assert error == 'connection_error' + # assert False + + +@patch('requests.request') +def test_options_allow_in_header(mock_request): + ''' OPTIONS returns Allow key ''' + mock_request.return_value = mockResponse(json_data={}, headers={'Allow': 'allowed'}, status_code=200) + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + message, error = rest_api.options(api) + assert error is None + assert message == {'Allow': 'allowed'} + + +@patch('requests.request') +def test_formdata_in_response(mock_request): + ''' GET return formdata ''' + mock_request.return_value = mockResponse( + json_data={}, headers={'Content-Type': 'multipart/form-data'}, raise_action='bad_json', status_code=200, text='testme') + rest_api = create_restapi_object(DEFAULT_ARGS) + api = 'api/testme' + message, error = rest_api.get(api) + assert error is None + assert message == {'text': 'testme'} diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_sf.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_sf.py new file mode 100644 index 000000000..99c74242b --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_sf.py @@ -0,0 +1,85 @@ +# Copyright (c) 2018-2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py - solidfire related methods ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, create_module, expect_and_capture_ansible_exception +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_sf_sdk(): + pytestmark = pytest.mark.skip("skipping as missing required solidfire") + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_ontap_module(default_args=None): + return create_module(MockONTAPModule, default_args).module + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_SF_SDK', 'dummy') +def test_has_sf_sdk(): + assert netapp_utils.has_sf_sdk() == 'dummy' + + +@patch('solidfire.factory.ElementFactory.create') +def test_create_sf_connection(mock_sf_create): + module = create_ontap_module(DEFAULT_ARGS) + mock_sf_create.return_value = 'dummy' + assert netapp_utils.create_sf_connection(module) == 'dummy' + + +@patch('solidfire.factory.ElementFactory.create') +def test_negative_create_sf_connection_exception(mock_sf_create): + module = create_ontap_module(DEFAULT_ARGS) + mock_sf_create.side_effect = KeyError('dummy') + assert str(expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, Exception, module)) == "Unable to create SF connection: 'dummy'" + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_SF_SDK', False) +def test_negative_create_sf_connection_no_sdk(): + module = create_ontap_module(DEFAULT_ARGS) + assert expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, 'fail', module)['msg'] == 'the python SolidFire SDK module is required' + + +def test_negative_create_sf_connection_no_options(): + module = create_ontap_module(DEFAULT_ARGS) + peer_options = {} + assert expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, 'fail', module, host_options=peer_options)['msg'] ==\ + 'hostname, username, password are required for ElementSW connection.' + + +def test_negative_create_sf_connection_missing_and_extra_options(): + module = create_ontap_module(DEFAULT_ARGS) + peer_options = {'hostname': 'host', 'username': 'user'} + assert expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, 'fail', module, host_options=peer_options)['msg'] ==\ + 'password is required for ElementSW connection.' + peer_options = {'hostname': 'host', 'username': 'user', 'cert_filepath': 'cert'} + assert expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, 'fail', module, host_options=peer_options)['msg'] ==\ + 'password is required for ElementSW connection. cert_filepath is not supported for ElementSW connection.' + + +def test_negative_create_sf_connection_extra_options(): + module = create_ontap_module(DEFAULT_ARGS) + peer_options = {'hostname': 'host', 'username': 'user'} + assert expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, 'fail', module, host_options=peer_options)['msg'] ==\ + 'password is required for ElementSW connection.' + peer_options = {'hostname': 'host', 'username': 'user', 'password': 'pass', 'cert_filepath': 'cert', 'key_filepath': 'key'} + assert expect_and_capture_ansible_exception(netapp_utils.create_sf_connection, 'fail', module, host_options=peer_options)['msg'] ==\ + 'cert_filepath, key_filepath are not supported for ElementSW connection.' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_zapi.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_zapi.py new file mode 100644 index 000000000..b3a09f5cb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_netapp_zapi.py @@ -0,0 +1,374 @@ +# Copyright (c) 2018-2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py - ZAPI related features ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, create_module, expect_and_capture_ansible_exception, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_raw_xml_response, build_zapi_error, zapi_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip("skipping as missing required netapp_lib") + +if not hasattr(netapp_utils.ssl, 'create_default_context') or not hasattr(netapp_utils.ssl, 'SSLContext'): + pytestmark = pytest.mark.skip("skipping as missing required ssl package with SSLContext support") + +ZRR = zapi_responses({ + 'error_no_vserver': build_zapi_error(12345, 'Vserver API missing vserver parameter.'), + 'error_connection_error': build_zapi_error(12345, 'URLError'), + 'error_other_error': build_zapi_error(12345, 'Some other error message.'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + +CERT_ARGS = { + 'hostname': 'test', + 'cert_filepath': 'test_pem.pem', + 'key_filepath': 'test_key.key' +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_ontap_module(default_args, module_args=None): + return create_module(MockONTAPModule, default_args, module_args).module + + +def create_ontapzapicx_object(default_args, module_args=None): + ontap_mock = create_module(MockONTAPModule, default_args, module_args) + my_args = {'module': ontap_mock.module} + for key in 'hostname', 'username', 'password', 'cert_filepath', 'key_filepath': + if key in ontap_mock.module.params: + my_args[key] = ontap_mock.module.params[key] + return netapp_utils.OntapZAPICx(**my_args) + + +def test_get_cserver(): + ''' validate cluster vserser name is correctly retrieved ''' + register_responses([ + ('vserver-get-iter', ZRR['cserver']), + ]) + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS)) + cserver = netapp_utils.get_cserver(server) + assert cserver == 'cserver' + + +def test_get_cserver_none(): + ''' validate cluster vserser name is correctly retrieved ''' + register_responses([ + ('vserver-get-iter', ZRR['empty']), + ]) + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS)) + cserver = netapp_utils.get_cserver(server) + assert cserver is None + + +def test_negative_get_cserver(): + ''' validate NaApiError is correctly reported ''' + register_responses([ + ('vserver-get-iter', ZRR['error']), + ]) + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS)) + assert expect_and_capture_ansible_exception(netapp_utils.get_cserver, netapp_utils.zapi.NaApiError, server) + + +def test_negative_get_cserver_connection_error(): + ''' validate NaApiError error is correctly ignore for connection or autorization issues ''' + register_responses([ + ('vserver-get-iter', ZRR['error_connection_error']), + ]) + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS)) + cserver = netapp_utils.get_cserver(server) + assert cserver is None + + +def test_setup_na_ontap_zapi_logging(): + module_args = {'feature_flags': {'trace_apis': False}} + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS, module_args)) + assert not server._trace + module_args = {'feature_flags': {'trace_apis': True}} + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS, module_args)) + assert server._trace + + +def test_setup_na_ontap_zapi_auth_method_and_https(): + module_args = {'feature_flags': {'trace_apis': False}} + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS, module_args)) + assert server._auth_style == server.STYLE_LOGIN_PASSWORD + assert server.get_port() == '80' + module_args = {'feature_flags': {'trace_apis': True}} + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(CERT_ARGS, module_args)) + assert server._auth_style == server.STYLE_CERTIFICATE + assert server.get_port() == '443' + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_negative_setup_na_ontap_zapi(): + error = 'Error: the python NetApp-Lib module is required. Import error: None' + assert expect_and_capture_ansible_exception(netapp_utils.setup_na_ontap_zapi, 'fail', create_ontap_module(DEFAULT_ARGS))['msg'] == error + + +def test_set_zapi_port_and_transport(): + server = netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS)) + netapp_utils.set_zapi_port_and_transport(server, True, None, False) + assert server.get_port() == '443' + assert server.get_transport_type() == 'https' + netapp_utils.set_zapi_port_and_transport(server, False, None, False) + assert server.get_port() == '80' + assert server.get_transport_type() == 'http' + + +@patch('ssl.SSLContext.load_cert_chain') +def test_certificate_method_zapi(mock_ssl): + ''' should fail when trying to read the certificate file ''' + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + assert isinstance(zapi_cx._create_certificate_auth_handler(), netapp_utils.zapi.urllib.request.HTTPSHandler) + assert zapi_cx._get_url() == 'http://test:80/servlets/netapp.servlets.admin.XMLrequest_filer' + + +def test_certificate_method_zapi_missing_files(): + ''' should fail when trying to read the certificate file ''' + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + msg1 = 'Cannot load SSL certificate, check files exist.' + # for python 2,6 :( + msg2 = 'SSL certificate authentication requires python 2.7 or later.' + assert expect_and_capture_ansible_exception(zapi_cx._create_certificate_auth_handler, 'fail')['msg'].startswith((msg1, msg2)) + assert zapi_cx._get_url() == 'http://test:80/servlets/netapp.servlets.admin.XMLrequest_filer' + + +@patch('ssl.create_default_context') +def test_negative_certificate_method_zapi(mock_ssl): + ''' should fail when trying to set context ''' + mock_ssl.side_effect = AttributeError('for test purpose') + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + # AttributeError('for test purpose') with 3.x but AttributeError('for test purpose',) with 2.7 + error = "SSL certificate authentication requires python 2.7 or later. More info: AttributeError('for test purpose'" + assert expect_and_capture_ansible_exception(zapi_cx._create_certificate_auth_handler, 'fail')['msg'].startswith(error) + + +def test_classify_zapi_exception_cluster_only(): + ''' verify output matches expectations ''' + code = 13005 + message = 'Unable to find API: diagnosis-alert-get-iter on data vserver trident_svm' + zapi_exception = netapp_utils.zapi.NaApiError(code, message) + kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception) + assert kind == 'missing_vserver_api_error' + assert new_message.endswith("%d:%s" % (code, message)) + + +def test_classify_zapi_exception_rpc_error(): + ''' verify output matches expectations ''' + code = 13001 + message = "RPC: Couldn't make connection [from mgwd on node \"laurentn-vsim1\" (VSID: -1) to mgwd at 172.32.78.223]" + error_message = 'NetApp API failed. Reason - %d:%s' % (code, message) + zapi_exception = netapp_utils.zapi.NaApiError(code, message) + kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception) + assert kind == 'rpc_error' + assert new_message == error_message + + +def test_classify_zapi_exception_other_error(): + ''' verify output matches expectations ''' + code = 13008 + message = 'whatever' + error_message = 'NetApp API failed. Reason - %d:%s' % (code, message) + zapi_exception = netapp_utils.zapi.NaApiError(code, message) + kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception) + assert kind == 'other_error' + assert new_message == error_message + + +def test_classify_zapi_exception_attributeerror(): + ''' verify output matches expectations ''' + zapi_exception = 'invalid' + kind, new_message = netapp_utils.classify_zapi_exception(zapi_exception) + assert kind == 'other_error' + assert new_message == zapi_exception + + +def test_zapi_parse_response_sanitized(): + ''' should not fail when trying to read invalid XML characters (\x08) ''' + zapi_cx = create_ontapzapicx_object(DEFAULT_ARGS) + response = b"\n\n" + response += b"\n" + response += b" (cluster log-forwarding create)\n\n" + response += b"Testing network connectivity to the destination host 10.10.10.10. \x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\n\n" + response += b"Error: command failed: Cannot contact destination host (10.10.10.10) from node\n" + response += b" "laurentn-vsim1". Verify connectivity to desired host or skip the\n" + response += b" connectivity check with the "-force" parameter." + response += b"0\n" + # Manually extract cli-output contents + cli_output = response.split(b'')[1] + cli_output = cli_output.split(b'')[0] + cli_output = cli_output.replace(b'"', b'"') + # the XML parser would chole on \x08, zapi_cx._parse_response replaces them with '.' + cli_output = cli_output.replace(b'\x08', b'.') + # Use xml parser to extract cli-output contents + xml = zapi_cx._parse_response(response) + results = xml.get_child_by_name('results') + new_cli_output = results.get_child_content('cli-output') + assert cli_output.decode() == new_cli_output + + +def test_zapi_parse_response_unsanitized(): + ''' should fail when trying to read invalid XML characters (\x08) ''' + # use feature_flags to disable sanitization + module_args = {'feature_flags': {'sanitize_xml': False}} + zapi_cx = create_ontapzapicx_object(DEFAULT_ARGS, module_args) + response = b"\n\n" + response += b"\n" + response += b" (cluster log-forwarding create)\n\n" + response += b"Testing network connectivity to the destination host 10.10.10.10. \x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\n\n" + response += b"Error: command failed: Cannot contact destination host (10.10.10.10) from node\n" + response += b" "laurentn-vsim1". Verify connectivity to desired host or skip the\n" + response += b" connectivity check with the "-force" parameter." + response += b"0\n" + with pytest.raises(netapp_utils.zapi.etree.XMLSyntaxError) as exc: + zapi_cx._parse_response(response) + msg = 'PCDATA invalid Char value 8' + assert exc.value.msg.startswith(msg) + + +def test_zapi_cx_add_auth_header(): + ''' should add header ''' + module = create_ontap_module(DEFAULT_ARGS) + zapi_cx = netapp_utils.setup_na_ontap_zapi(module) + assert isinstance(zapi_cx, netapp_utils.OntapZAPICx) + assert zapi_cx.base64_creds is not None + request, dummy = zapi_cx._create_request(netapp_utils.zapi.NaElement('dummy_tag')) + assert "Authorization" in [x[0] for x in request.header_items()] + + +def test_zapi_cx_add_auth_header_explicit(): + ''' should add header ''' + module_args = {'feature_flags': {'classic_basic_authorization': False}} + module = create_ontap_module(DEFAULT_ARGS, module_args) + zapi_cx = netapp_utils.setup_na_ontap_zapi(module) + assert isinstance(zapi_cx, netapp_utils.OntapZAPICx) + assert zapi_cx.base64_creds is not None + request, dummy = zapi_cx._create_request(netapp_utils.zapi.NaElement('dummy_tag')) + assert "Authorization" in [x[0] for x in request.header_items()] + + +def test_zapi_cx_no_auth_header(): + ''' should add header ''' + module_args = {'feature_flags': {'classic_basic_authorization': True, 'always_wrap_zapi': False}} + module = create_ontap_module(DEFAULT_ARGS, module_args) + zapi_cx = netapp_utils.setup_na_ontap_zapi(module) + assert not isinstance(zapi_cx, netapp_utils.OntapZAPICx) + request, dummy = zapi_cx._create_request(netapp_utils.zapi.NaElement('dummy_tag')) + assert "Authorization" not in [x[0] for x in request.header_items()] + + +def test_is_zapi_connection_error(): + message = 'URLError' + assert netapp_utils.is_zapi_connection_error(message) + if sys.version_info >= (3, 5, 0): + # not defined in python 2.7 + message = (ConnectionError(), '') + assert netapp_utils.is_zapi_connection_error(message) + message = [] + assert not netapp_utils.is_zapi_connection_error(message) + + +def test_is_zapi_write_access_error(): + message = 'Insufficient privileges: XXXXXXX does not have write access' + assert netapp_utils.is_zapi_write_access_error(message) + message = 'URLError' + assert not netapp_utils.is_zapi_write_access_error(message) + message = [] + assert not netapp_utils.is_zapi_write_access_error(message) + + +def test_is_zapi_missing_vserver_error(): + message = 'Vserver API missing vserver parameter.' + assert netapp_utils.is_zapi_missing_vserver_error(message) + message = 'URLError' + assert not netapp_utils.is_zapi_missing_vserver_error(message) + message = [] + assert not netapp_utils.is_zapi_missing_vserver_error(message) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.IMPORT_EXCEPTION', 'test_exc') +def test_netapp_lib_is_required(): + msg = 'Error: the python NetApp-Lib module is required. Import error: %s' % 'test_exc' + assert netapp_utils.netapp_lib_is_required() == msg + + +def test_warn_when_rest_is_not_supported_http(): + assert netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS, {'use_rest': 'always'})) + print_warnings() + assert_warning_was_raised("Using ZAPI for basic.py, ignoring 'use_rest: always'. Note: https is set to false.") + + +def test_warn_when_rest_is_not_supported_https(): + assert netapp_utils.setup_na_ontap_zapi(module=create_ontap_module(DEFAULT_ARGS, {'use_rest': 'always', 'https': True})) + print_warnings() + assert_warning_was_raised("Using ZAPI for basic.py, ignoring 'use_rest: always'.") + + +def test_sanitize_xml(): + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + xml = build_raw_xml_response({'test_key': 'test_Value'}) + print('XML', xml) + assert zapi_cx.sanitize_xml(xml) == xml + + # these tests require that 'V' is not used, and 3.x because of bytes + if sys.version_info > (3, 0): + test_xml = zapi_cx.sanitize_xml(xml.replace(b'V', bytes([8]))) + sanitized_xml = xml.replace(b'V', b'.') + assert zapi_cx.sanitize_xml(test_xml) == sanitized_xml + + with patch('builtins.bytes') as mock_bytes: + # forcing bytes to return some unexpected value to force the older paths + mock_bytes.return_value = 0 + assert zapi_cx.sanitize_xml(test_xml) == sanitized_xml + with patch('builtins.chr') as mock_chr: + # forcing python 2.7 behavior + mock_chr.return_value = b'\x08' + assert zapi_cx.sanitize_xml(test_xml) == sanitized_xml + + +def test_parse_response_exceptions_single(): + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + exc = expect_and_capture_ansible_exception(zapi_cx._parse_response, netapp_utils.zapi.etree.XMLSyntaxError, b'response') + print(exc.value) + assert str(exc.value).startswith('Start tag expected') + + +@patch('netapp_lib.api.zapi.zapi.NaServer._parse_response') +def test_parse_response_exceptions_double(mock_parse_response): + xml_exc = netapp_utils.zapi.etree.XMLSyntaxError('UT', 'code', 101, 22, 'filename') + mock_parse_response.side_effect = [xml_exc, KeyError('second exception')] + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + exc = expect_and_capture_ansible_exception(zapi_cx._parse_response, netapp_utils.zapi.etree.XMLSyntaxError, 'response') + print(exc) + assert str(exc.value) == 'UT. Received: response (filename, line 101)' + + # force an exception while processing exception + delattr(xml_exc, 'msg') + mock_parse_response.side_effect = [xml_exc, KeyError('second exception')] + zapi_cx = create_ontapzapicx_object(CERT_ARGS) + exc = expect_and_capture_ansible_exception(zapi_cx._parse_response, netapp_utils.zapi.etree.XMLSyntaxError, 'response') + print(exc) + assert str(exc.value) == 'None (filename, line 101)' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_response_helper.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_response_helper.py new file mode 100644 index 000000000..21bb3c187 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_response_helper.py @@ -0,0 +1,156 @@ +# Copyright (c) 2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils rest_generic.py - REST features ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible_collections.netapp.ontap.plugins.module_utils import rest_response_helpers + +RECORD = {'key': 'value'} + +RESPONSES = { + 'empty': {}, + 'zero_record': {'num_records': 0}, + 'empty_records': {'records': []}, + 'one_record': {'records': [RECORD], 'num_records': 1}, + 'one_record_no_num_records': {'records': [RECORD]}, + 'one_record_no_num_records_no_records': RECORD, + 'two_records': {'records': [RECORD, RECORD], 'num_records': 2}, +} + + +def test_check_for_0_or_1_records(): + # no records --> None + response_in, error_in, response_out, error_out = RESPONSES['zero_record'], None, None, None + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['empty_records'], None, None, None + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + + # one record + response_in, error_in, response_out, error_out = RESPONSES['one_record'], None, RECORD, None + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['one_record_no_num_records'], None, RECORD, None + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['one_record_no_num_records_no_records'], None, RECORD, None + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + + +def test_check_for_0_or_1_records_errors(): + # bad input + response_in, error_in, response_out, error_out = None, None, None, 'calling: cluster: no response None.' + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['empty'], None, None, 'calling: cluster: no response {}.' + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + + # error in + response_in, error_in, response_out, error_out = None, 'some_error', None, 'calling: cluster: got some_error.' + assert rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) == (response_out, error_out) + + # more than 1 record + response_in, error_in, response_out, error_out = RESPONSES['two_records'], None, RESPONSES['two_records'], 'calling: cluster: unexpected response' + response, error = rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in) + assert response == response_out + assert error.startswith(error_out) + assert 'for query' not in error + response, error = rest_response_helpers.check_for_0_or_1_records('cluster', response_in, error_in, query=RECORD) + assert response == response_out + assert error.startswith(error_out) + expected = 'for query: %s' % RECORD + assert expected in error + + +def test_check_for_0_or_more_records(): + # no records --> None + response_in, error_in, response_out, error_out = RESPONSES['zero_record'], None, None, None + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['empty_records'], None, None, None + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + + # one record + response_in, error_in, response_out, error_out = RESPONSES['one_record'], None, [RECORD], None + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['one_record_no_num_records'], None, [RECORD], None + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + + # more than 1 record + response_in, error_in, response_out, error_out = RESPONSES['two_records'], None, [RECORD, RECORD], None + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + + +def test_check_for_0_or_more_records_errors(): + # bad input + response_in, error_in, response_out, error_out = None, None, None, 'calling: cluster: no response None.' + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + response_in, error_in, response_out, error_out = RESPONSES['empty'], None, None, 'calling: cluster: no response {}.' + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + error = "calling: cluster: got No \"records\" key in {'key': 'value'}." + response_in, error_in, response_out, error_out = RESPONSES['one_record_no_num_records_no_records'], None, None, error + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + + # error in + response_in, error_in, response_out, error_out = None, 'some_error', None, 'calling: cluster: got some_error.' + assert rest_response_helpers.check_for_0_or_more_records('cluster', response_in, error_in) == (response_out, error_out) + + +class MockOntapRestAPI: + def __init__(self, job_response=None, error=None, raise_if_called=False): + self.job_response, self.error, self.raise_if_called = job_response, error, raise_if_called + + def wait_on_job(self, job): + if self.raise_if_called: + raise AttributeError('wait_on_job should not be called in this test!') + return self.job_response, self.error + + +def test_check_for_error_and_job_results_no_job(): + rest_api = MockOntapRestAPI(raise_if_called=True) + response_in, error_in, response_out, error_out = None, None, None, None + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + + response_in, error_in, response_out, error_out = 'any', None, 'any', None + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + + response = {'no_job': 'entry'} + response_in, error_in, response_out, error_out = response, None, response, None + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + + +def test_check_for_error_and_job_results_with_job(): + rest_api = MockOntapRestAPI(job_response='job_response', error=None) + response = {'job': 'job_entry'} + expected_response = {'job': 'job_entry', 'job_response': 'job_response'} + response_in, error_in, response_out, error_out = response, None, expected_response, None + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + + response = {'jobs': ['job_entry'], 'num_records': 1} + expected_response = {'jobs': ['job_entry'], 'num_records': 1, 'job_response': 'job_response'} + response_in, error_in, response_out, error_out = response, None, expected_response, None + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + + +def test_negative_check_for_error_and_job_results_error_in(): + rest_api = MockOntapRestAPI(raise_if_called=True) + response_in, error_in, response_out, error_out = None, 'forced_error', None, 'calling: cluster: got forced_error.' + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api, raw_error=False) == (response_out, error_out) + error_out = 'forced_error' + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api, raw_error=True) == (response_out, error_out) + + +def test_negative_check_for_error_and_job_results_job_error(): + rest_api = MockOntapRestAPI(job_response='job_response', error='job_error') + response = {'job': 'job_entry'} + response_in, error_in, response_out, error_out = response, None, response, "job reported error: job_error, received {'job': 'job_entry'}." + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api, raw_error=False) == (response_out, error_out) + error_out = 'job_error' + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api, raw_error=True) == (response_out, error_out) + + +def test_negative_check_for_error_and_job_results_multiple_jobs_error(): + rest_api = MockOntapRestAPI(raise_if_called=True) + response = {'jobs': 'job_entry', 'num_records': 3} + response_in, error_in, response_out, error_out = response, None, response, "multiple jobs in progress, can't check status" + assert rest_response_helpers.check_for_error_and_job_results('cluster', response_in, error_in, rest_api) == (response_out, error_out) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_application.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_application.py new file mode 100644 index 000000000..346114ebb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_application.py @@ -0,0 +1,346 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2022, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" unit tests for module_utils rest_vserver.py + + Provides wrappers for svm/svms REST APIs +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import create_module, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.module_utils import rest_application + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'app_uuid': (200, {"records": [{"uuid": "test_uuid"}], "num_records": 1}, None), + 'app_details': (200, {"details": "test_details"}, None), + 'app_components': (200, {"records": [{"component": "test_component", "uuid": "component_uuid"}], "num_records": 1}, None), + 'app_component_details': (200, {"component": "test_component", "uuid": "component_uuid", 'backing_storage': 'backing_storage'}, None), + 'unexpected_argument': (200, None, 'Unexpected argument: exclude_aggregates'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_restapi_object(default_args, module_args=None): + module = create_module(MockONTAPModule, default_args, module_args) + return netapp_utils.OntapRestAPI(module.module) + + +def create_app(svm_name='vserver_name', app_name='application_name'): + rest_api = create_restapi_object(DEFAULT_ARGS) + return rest_application.RestApplication(rest_api, svm_name, app_name) + + +def test_successfully_create_object(): + register_responses([ + # ('GET', 'svm/svms', SRR['svm_uuid']), + # ('GET', 'svm/svms', SRR['zero_records']), + ]) + assert create_app().svm_name == 'vserver_name' + + +def test_successfully_get_application_uuid(): + register_responses([ + ('GET', 'application/applications', SRR['zero_records']), + ('GET', 'application/applications', SRR['app_uuid']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == (None, None) + assert my_app.get_application_uuid() == ('test_uuid', None) + # UUID is cached if not None, so no API call + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_uuid() == ('test_uuid', None) + + +def test_negative_get_application_uuid(): + register_responses([ + ('GET', 'application/applications', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == (None, rest_error_message('', 'application/applications')) + + +def test_successfully_get_application_details(): + register_responses([ + ('GET', 'application/applications', SRR['zero_records']), + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid', SRR['app_details']), + ('GET', 'application/applications/test_uuid', SRR['app_details']), + ('GET', 'application/applications/test_uuid', SRR['app_details']), + ]) + my_app = create_app() + assert my_app.get_application_details() == (None, None) + assert my_app.get_application_details() == (SRR['app_details'][1], None) + # UUID is cached if not None, so no API call + assert my_app.get_application_details(template='test') == (SRR['app_details'][1], None) + assert my_app.get_application_details() == (SRR['app_details'][1], None) + + +def test_negative_get_application_details(): + register_responses([ + ('GET', 'application/applications', SRR['generic_error']), + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.get_application_details() == (None, rest_error_message('', 'application/applications')) + assert my_app.get_application_details() == (None, rest_error_message('', 'application/applications/test_uuid')) + + +def test_successfully_create_application(): + register_responses([ + ('POST', 'application/applications', SRR['success']), + ]) + my_app = create_app() + assert my_app.create_application({'option': 'option'}) == ({}, None) + + +def test_negative_create_application(): + register_responses([ + ('POST', 'application/applications', SRR['generic_error']), + ('POST', 'application/applications', SRR['unexpected_argument']), + # third call, create fails if app already exists + ('GET', 'application/applications', SRR['app_uuid']), + ]) + my_app = create_app() + assert my_app.create_application({'option': 'option'}) == (None, rest_error_message('', 'application/applications')) + assert my_app.create_application({'option': 'option'}) == ( + None, 'calling: application/applications: got Unexpected argument: exclude_aggregates. "exclude_aggregates" requires ONTAP 9.9.1 GA or later.') + + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.create_application({'option': 'option'}) ==\ + (None, 'function create_application should not be called when application uuid is set: test_uuid.') + + +def test_successfully_patch_application(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('PATCH', 'application/applications/test_uuid', SRR['success']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.patch_application({'option': 'option'}) == ({}, None) + + +def test_negative_patch_application(): + register_responses([ + # first call, patch fails if app does not exist + # second call + ('GET', 'application/applications', SRR['app_uuid']), + ('PATCH', 'application/applications/test_uuid', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.patch_application({'option': 'option'}) == (None, 'function should not be called before application uuid is set.') + + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.patch_application({'option': 'option'}) == (None, rest_error_message('', 'application/applications/test_uuid')) + + +def test_successfully_delete_application(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('DELETE', 'application/applications/test_uuid', SRR['success']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.delete_application() == ({}, None) + + +def test_negative_delete_application(): + register_responses([ + # first call, delete fails if app does not exist + # second call + ('GET', 'application/applications', SRR['app_uuid']), + ('DELETE', 'application/applications/test_uuid', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.delete_application() == (None, 'function should not be called before application uuid is set.') + + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.delete_application() == (None, rest_error_message('', 'application/applications/test_uuid')) + + +def test_successfully_get_application_components(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['zero_records']), + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_components() == (None, None) + assert my_app.get_application_components() == (SRR['app_components'][1]['records'], None) + assert my_app.get_application_components() == (SRR['app_components'][1]['records'], None) + + +def test_negative_get_application_components(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.get_application_components() == (None, 'function should not be called before application uuid is set.') + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_components() == (None, rest_error_message('', 'application/applications/test_uuid/components')) + + +def test_successfully_get_application_component_uuid(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['zero_records']), + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_component_uuid() == (None, None) + assert my_app.get_application_component_uuid() == ('component_uuid', None) + assert my_app.get_application_component_uuid() == ('component_uuid', None) + + +def test_negative_get_application_component_uuid(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.get_application_component_uuid() == (None, 'function should not be called before application uuid is set.') + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_component_uuid() == (None, rest_error_message('', 'application/applications/test_uuid/components')) + + +def test_successfully_get_application_component_details(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ('GET', 'application/applications/test_uuid/components/component_uuid', SRR['app_components']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_component_details() == (SRR['app_components'][1]['records'][0], None) + + +def test_negative_get_application_component_details(): + register_responses([ + # first call, fail as UUID not set + # second call, fail to retrieve UUID + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['zero_records']), + # fail to retrieve UUID + ('GET', 'application/applications/test_uuid/components', SRR['generic_error']), + # fail to retrieve component_details + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ('GET', 'application/applications/test_uuid/components/component_uuid', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.get_application_component_details() == (None, 'function should not be called before application uuid is set.') + # second call, set UUI first + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_component_details() == (None, 'no component for application application_name') + # third call + assert my_app.get_application_component_details() == (None, rest_error_message('', 'application/applications/test_uuid/components')) + # fourth call + assert my_app.get_application_component_details() == (None, rest_error_message('', 'application/applications/test_uuid/components/component_uuid')) + + +def test_successfully_get_application_component_backing_storage(): + register_responses([ + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ('GET', 'application/applications/test_uuid/components/component_uuid', SRR['app_component_details']), + ]) + my_app = create_app() + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_component_backing_storage() == ('backing_storage', None) + + +def test_negative_get_application_component_backing_storage(): + register_responses([ + # first call, fail as UUID not set + # second call, fail to retrieve UUID + ('GET', 'application/applications', SRR['app_uuid']), + ('GET', 'application/applications/test_uuid/components', SRR['zero_records']), + # fail to retrieve UUID + ('GET', 'application/applications/test_uuid/components', SRR['generic_error']), + # fail to retrieve component_backing_storage + ('GET', 'application/applications/test_uuid/components', SRR['app_components']), + ('GET', 'application/applications/test_uuid/components/component_uuid', SRR['generic_error']), + ]) + my_app = create_app() + assert my_app.get_application_component_backing_storage() == (None, 'function should not be called before application uuid is set.') + # second call, set UUI first + assert my_app.get_application_uuid() == ('test_uuid', None) + assert my_app.get_application_component_backing_storage() == (None, 'no component for application application_name') + # third call + assert my_app.get_application_component_backing_storage() == (None, rest_error_message('', 'application/applications/test_uuid/components')) + # fourth call + assert my_app.get_application_component_backing_storage() == (None, rest_error_message('', 'application/applications/test_uuid/components/component_uuid')) + + +def test_create_application_body(): + my_app = create_app() + body = { + 'name': my_app.app_name, + 'svm': {'name': my_app.svm_name}, + 'smart_container': True, + 'tname': 'tbody' + } + assert my_app.create_application_body('tname', 'tbody') == (body, None) + body['smart_container'] = False + assert my_app.create_application_body('tname', 'tbody', False) == (body, None) + assert my_app.create_application_body('tname', 'tbody', 'False') == (None, 'expecting bool value for smart_container, got: False') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_generic.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_generic.py new file mode 100644 index 000000000..b2b42ed97 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_generic.py @@ -0,0 +1,492 @@ +# Copyright (c) 2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils rest_generic.py - REST features ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, create_module +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import rest_generic + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'vservers_with_admin': (200, { + 'records': [ + {'vserver': 'vserver1', 'type': 'data '}, + {'vserver': 'vserver2', 'type': 'data '}, + {'vserver': 'cserver', 'type': 'admin'} + ]}, None), + 'vservers_single': (200, { + 'records': [ + {'vserver': 'single', 'type': 'data '}, + ]}, None), + 'accepted_response': (202, { + 'job': { + 'uuid': 'd0b3eefe-cd59-11eb-a170-005056b338cd', + '_links': {'self': {'href': '/api/cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd'}} + }}, None), + 'job_in_progress': (200, { + 'job': { + 'uuid': 'a1b2c3_job', + '_links': {'self': {'href': 'api/some_link'}} + }}, None), + 'job_success': (200, { + 'state': 'success', + 'message': 'success_message', + 'job': { + 'uuid': 'a1b2c3_job', + '_links': {'self': {'href': 'some_link'}} + }}, None), + 'job_failed': (200, { + 'state': 'error', + 'message': 'error_message', + 'job': { + 'uuid': 'a1b2c3_job', + '_links': {'self': {'href': 'some_link'}} + }}, None), +}) + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + +CERT_ARGS = { + 'hostname': 'test', + 'cert_filepath': 'test_pem.pem', + 'key_filepath': 'test_key.key' +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_restapi_object(default_args, module_args=None): + module = create_module(MockONTAPModule, default_args, module_args) + return netapp_utils.OntapRestAPI(module.module) + + +def test_build_query_with_fields(): + assert rest_generic.build_query_with_fields(None, None) is None + assert rest_generic.build_query_with_fields(query=None, fields=None) is None + assert rest_generic.build_query_with_fields(query={'aaa': 'vvv'}, fields=None) == {'aaa': 'vvv'} + assert rest_generic.build_query_with_fields(query=None, fields='aaa,bbb') == {'fields': 'aaa,bbb'} + assert rest_generic.build_query_with_fields(query={'aaa': 'vvv'}, fields='aaa,bbb') == {'aaa': 'vvv', 'fields': 'aaa,bbb'} + + +def test_build_query_with_timeout(): + assert rest_generic.build_query_with_timeout(query=None, timeout=30) == {'return_timeout': 30} + + # when timeout is 0, return_timeout is not added + assert rest_generic.build_query_with_timeout(query=None, timeout=0) is None + assert rest_generic.build_query_with_timeout(query={'aaa': 'vvv'}, timeout=0) == {'aaa': 'vvv'} + + # when return_timeout is in the query, it has precedence + query = {'return_timeout': 55} + assert rest_generic.build_query_with_timeout(query, timeout=0) == query + assert rest_generic.build_query_with_timeout(query, timeout=20) == query + query = {'aaa': 'vvv', 'return_timeout': 55} + assert rest_generic.build_query_with_timeout(query, timeout=0) == query + assert rest_generic.build_query_with_timeout(query, timeout=20) == query + + +def test_successful_get_one_record_no_records_field(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster') + assert error is None + assert record == SRR['is_rest_9_10_1'][1] + + +def test_successful_get_one_record(): + register_responses([ + ('GET', 'cluster', SRR['vservers_single']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster') + assert error is None + assert record == SRR['vservers_single'][1]['records'][0] + + +def test_successful_get_one_record_no_record(): + register_responses([ + ('GET', 'cluster', SRR['zero_records']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster') + assert error is None + assert record is None + + +def test_successful_get_one_record_NN(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query=None, fields=None) + assert error is None + assert record == SRR['is_rest_9_10_1'][1] + + +def test_successful_get_one_record_NV(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query=None, fields='aaa,bbb') + assert error is None + assert record == SRR['is_rest_9_10_1'][1] + + +def test_successful_get_one_record_VN(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query={'aaa': 'value'}, fields=None) + assert error is None + assert record == SRR['is_rest_9_10_1'][1] + + +def test_successful_get_one_record_VV(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query={'aaa': 'value'}, fields='aaa,bbb') + assert error is None + assert record == SRR['is_rest_9_10_1'][1] + + +def test_error_get_one_record_empty(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query=None, fields=None) + assert error == 'calling: cluster: no response {}.' + assert record is None + + +def test_error_get_one_record_multiple(): + register_responses([ + ('GET', 'cluster', SRR['vservers_with_admin']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query={'aaa': 'vvv'}, fields=None) + assert "calling: cluster: unexpected response {'records':" in error + assert "for query: {'aaa': 'vvv'}" in error + assert record == SRR['vservers_with_admin'][1] + + +def test_error_get_one_record_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record, error = rest_generic.get_one_record(rest_api, 'cluster', query=None, fields=None) + assert error == 'calling: cluster: got Expected error.' + assert record is None + + +def test_successful_get_0_or_more_records(): + register_responses([ + ('GET', 'cluster', SRR['vservers_with_admin']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster') + assert error is None + assert records == SRR['vservers_with_admin'][1]['records'] + + +def test_successful_get_0_or_more_records_NN(): + register_responses([ + ('GET', 'cluster', SRR['vservers_with_admin']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query=None, fields=None) + assert error is None + assert records == SRR['vservers_with_admin'][1]['records'] + + +def test_successful_get_0_or_more_records_NV(): + register_responses([ + ('GET', 'cluster', SRR['vservers_with_admin']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query=None, fields='aaa,bbb') + assert error is None + assert records == SRR['vservers_with_admin'][1]['records'] + + +def test_successful_get_0_or_more_records_VN(): + register_responses([ + ('GET', 'cluster', SRR['vservers_with_admin']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query={'aaa': 'value'}, fields=None) + assert error is None + assert records == SRR['vservers_with_admin'][1]['records'] + + +def test_successful_get_0_or_more_records_VV(): + register_responses([ + ('GET', 'cluster', SRR['vservers_with_admin']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query={'aaa': 'value'}, fields='aaa,bbb') + assert error is None + assert records == SRR['vservers_with_admin'][1]['records'] + + +def test_successful_get_0_or_more_records_VV_1_record(): + register_responses([ + ('GET', 'cluster', SRR['vservers_single']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query={'aaa': 'value'}, fields='aaa,bbb') + assert error is None + assert records == SRR['vservers_single'][1]['records'] + + +def test_successful_get_0_or_more_records_VV_0_record(): + register_responses([ + ('GET', 'cluster', SRR['zero_records']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query={'aaa': 'value'}, fields='aaa,bbb') + assert error is None + assert records is None + + +def test_error_get_0_or_more_records(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query=None, fields=None) + assert error == 'calling: cluster: no response {}.' + assert records is None + + +def test_error_get_0_or_more_records_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + records, error = rest_generic.get_0_or_more_records(rest_api, 'cluster', query=None, fields=None) + assert error == 'calling: cluster: got Expected error.' + assert records is None + + +def test_successful_post_async(): + register_responses([ + ('POST', 'cluster', SRR['vservers_single']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.post_async(rest_api, 'cluster', {}) + assert error is None + assert response == SRR['vservers_single'][1] + + +def test_error_post_async(): + register_responses([ + ('POST', 'cluster', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.post_async(rest_api, 'cluster', {}) + assert error == 'calling: cluster: got Expected error.' + assert response is None + + +@patch('time.sleep') +def test_successful_post_async_with_job(dont_sleep): + register_responses([ + ('POST', 'cluster', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_success']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.post_async(rest_api, 'cluster', {}) + assert error is None + assert 'job_response' in response + assert response['job_response'] == 'success_message' + + +@patch('time.sleep') +def test_successful_post_async_with_job_failure(dont_sleep): + register_responses([ + ('POST', 'cluster', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_failed']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.post_async(rest_api, 'cluster', {}) + assert error is None + assert 'job_response' in response + assert response['job_response'] == 'error_message' + + +@patch('time.sleep') +def test_error_post_async_with_job(dont_sleep): + register_responses([ + ('POST', 'cluster', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.post_async(rest_api, 'cluster', {}) + assert 'job reported error: Expected error - Expected error - Expected error - Expected error, received' in error + assert response == SRR['accepted_response'][1] + + +def test_successful_patch_async(): + register_responses([ + ('PATCH', 'cluster/uuid', SRR['vservers_single']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.patch_async(rest_api, 'cluster', 'uuid', {}) + assert error is None + assert response == SRR['vservers_single'][1] + + +def test_error_patch_async(): + register_responses([ + ('PATCH', 'cluster/uuid', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.patch_async(rest_api, 'cluster', 'uuid', {}) + assert error == 'calling: cluster/uuid: got Expected error.' + assert response is None + + +@patch('time.sleep') +def test_successful_patch_async_with_job(dont_sleep): + register_responses([ + ('PATCH', 'cluster/uuid', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_success']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.patch_async(rest_api, 'cluster', 'uuid', {}) + assert error is None + assert 'job_response' in response + assert response['job_response'] == 'success_message' + + +@patch('time.sleep') +def test_successful_patch_async_with_job_failure(dont_sleep): + register_responses([ + ('PATCH', 'cluster/uuid', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_failed']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.patch_async(rest_api, 'cluster', 'uuid', {}) + assert error is None + assert 'job_response' in response + assert response['job_response'] == 'error_message' + + +@patch('time.sleep') +def test_error_patch_async_with_job(dont_sleep): + register_responses([ + ('PATCH', 'cluster/uuid', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.patch_async(rest_api, 'cluster', 'uuid', {}) + assert 'job reported error: Expected error - Expected error - Expected error - Expected error, received' in error + assert response == SRR['accepted_response'][1] + + +def test_successful_delete_async(): + register_responses([ + ('DELETE', 'cluster/uuid', SRR['vservers_single']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.delete_async(rest_api, 'cluster', 'uuid') + assert error is None + assert response == SRR['vservers_single'][1] + + +def test_error_delete_async(): + register_responses([ + ('DELETE', 'cluster/uuid', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.delete_async(rest_api, 'cluster', 'uuid') + assert error == 'calling: cluster/uuid: got Expected error.' + assert response is None + + +@patch('time.sleep') +def test_successful_delete_async_with_job(dont_sleep): + register_responses([ + ('DELETE', 'cluster/uuid', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_success']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.delete_async(rest_api, 'cluster', 'uuid') + assert error is None + assert 'job_response' in response + assert response['job_response'] == 'success_message' + + +@patch('time.sleep') +def test_successful_delete_async_with_job_failure(dont_sleep): + register_responses([ + ('DELETE', 'cluster/uuid', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_failed']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.delete_async(rest_api, 'cluster', 'uuid') + assert error is None + assert 'job_response' in response + assert response['job_response'] == 'error_message' + + +@patch('time.sleep') +def test_error_delete_async_with_job(dont_sleep): + register_responses([ + ('DELETE', 'cluster/uuid', SRR['accepted_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_in_progress']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + response, error = rest_generic.delete_async(rest_api, 'cluster', 'uuid') + assert 'job reported error: Expected error - Expected error - Expected error - Expected error, received' in error + assert response == SRR['accepted_response'][1] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_owning_resource.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_owning_resource.py new file mode 100644 index 000000000..a7465e8d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_owning_resource.py @@ -0,0 +1,98 @@ +# Copyright (c) 2022 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils rest_generic.py - REST features ''' +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.module_utils import rest_owning_resource + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'get_uuid_policy_id_export_policy': ( + 200, + { + "records": [{ + "svm": { + "uuid": "uuid", + "name": "svm"}, + "id": 123, + "name": "ansible" + }], + "num_records": 1}, None), + 'get_uuid_from_volume': ( + 200, + { + "records": [{ + "svm": { + "uuid": "uuid", + "name": "svm"}, + "uuid": "028baa66-41bd-11e9-81d5-00a0986138f7" + }] + }, None + ) +}) + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_restapi_object(default_args, module_args=None): + module = create_module(MockONTAPModule, default_args, module_args) + return netapp_utils.OntapRestAPI(module.module) + + +def test_get_policy_id(): + register_responses([ + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record = rest_owning_resource.get_export_policy_id(rest_api, 'ansible', 'svm', rest_api.module) + assert record == SRR['get_uuid_policy_id_export_policy'][1]['records'][0]['id'] + + +def test_error_get_policy_id(): + register_responses([ + ('GET', 'protocols/nfs/export-policies', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + error = 'Could not find export policy ansible on SVM svm' + assert error in expect_and_capture_ansible_exception(rest_owning_resource.get_export_policy_id, 'fail', rest_api, 'ansible', 'svm', rest_api.module)['msg'] + + +def test_get_volume_uuid(): + register_responses([ + ('GET', 'storage/volumes', SRR['get_uuid_from_volume']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + record = rest_owning_resource.get_volume_uuid(rest_api, 'ansible', 'svm', rest_api.module) + assert record == SRR['get_uuid_from_volume'][1]['records'][0]['uuid'] + + +def test_error_get_volume_uuid(): + register_responses([ + ('GET', 'storage/volumes', SRR['generic_error']) + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + error = 'Could not find volume ansible on SVM svm' + assert error in expect_and_capture_ansible_exception(rest_owning_resource.get_volume_uuid, 'fail', rest_api, 'ansible', 'svm', rest_api.module)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_volume.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_volume.py new file mode 100644 index 000000000..0c1a77e7f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_volume.py @@ -0,0 +1,233 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2021, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" unit tests for module_utils netapp_module.py + + Provides wrappers for storage/volumes REST APIs +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import pytest +import sys + +# from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, call +from ansible_collections.netapp.ontap.plugins.module_utils import rest_volume +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'one_volume_record': (200, dict(records=[ + dict(uuid='a1b2c3', + name='test', + svm=dict(name='vserver'), + ) + ], num_records=1), None), + 'three_volume_records': (200, dict(records=[ + dict(uuid='a1b2c3_1', + name='test1', + svm=dict(name='vserver'), + ), + dict(uuid='a1b2c3_2', + name='test2', + svm=dict(name='vserver'), + ), + dict(uuid='a1b2c3_3', + name='test3', + svm=dict(name='vserver'), + ) + ], num_records=3), None), + 'job': (200, { + 'job': { + 'uuid': 'a1b2c3_job', + '_links': {'self': {'href': 'api/some_link'}} + }}, None), + 'job_bad_url': (200, { + 'job': { + 'uuid': 'a1b2c3_job', + '_links': {'self': {'href': 'some_link'}} + }}, None), + 'job_status_success': (200, { + 'state': 'success', + 'message': 'success_message', + 'job': { + 'uuid': 'a1b2c3_job', + '_links': {'self': {'href': 'some_link'}} + }}, None), +} + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +class MockModule(object): + ''' rough mock for an Ansible module class ''' + def __init__(self): + self.params = dict( + username='my_username', + password='my_password', + hostname='my_hostname', + use_rest='my_use_rest', + cert_filepath=None, + key_filepath=None, + validate_certs='my_validate_certs', + http_port=None, + feature_flags=None, + ) + + def fail_json(self, *args, **kwargs): # pylint: disable=unused-argument + """function to simulate fail_json: package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_get_volumes_none(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + SRR['zero_record'], + SRR['end_of_sequence']] + volumes, error = rest_volume.get_volumes(rest_api) + assert error is None + assert volumes is None + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_get_volumes_one(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + SRR['one_volume_record'], + SRR['end_of_sequence']] + volumes, error = rest_volume.get_volumes(rest_api, 'vserver', 'name') + assert error is None + assert volumes == [SRR['one_volume_record'][1]['records'][0]] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_get_volumes_three(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + SRR['three_volume_records'], + SRR['end_of_sequence']] + volumes, error = rest_volume.get_volumes(rest_api) + assert error is None + assert volumes == [SRR['three_volume_records'][1]['records'][x] for x in (0, 1, 2)] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_get_volume_not_found(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + SRR['zero_record'], + SRR['end_of_sequence']] + volume, error = rest_volume.get_volume(rest_api, 'name', 'vserver') + assert error is None + assert volume is None + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_get_volume_found(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + SRR['one_volume_record'], + SRR['end_of_sequence']] + volume, error = rest_volume.get_volume(rest_api, 'name', 'vserver') + assert error is None + assert volume == SRR['one_volume_record'][1]['records'][0] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_get_volume_too_many(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + SRR['three_volume_records'], + SRR['end_of_sequence']] + dummy, error = rest_volume.get_volume(rest_api, 'name', 'vserver') + expected = "calling: storage/volumes: unexpected response" + assert expected in error + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_patch_volume_async(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + copy.deepcopy(SRR['job']), # deepcopy as job is modified in place! + SRR['job_status_success'], + SRR['end_of_sequence']] + body = dict(a1=1, a2=True, a3='str') + response, error = rest_volume.patch_volume(rest_api, 'uuid', body) + job = dict(SRR['job'][1]) # deepcopy as job is modified in place! + job['job_response'] = SRR['job_status_success'][1]['message'] + assert error is None + assert response == job + expected = call('PATCH', 'storage/volumes/uuid', {'return_timeout': 30}, json=body, headers=None, files=None) + assert expected in mock_request.mock_calls + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_patch_volume_async_with_query(mock_request): + module = MockModule() + rest_api = netapp_utils.OntapRestAPI(module) + mock_request.side_effect = [ + copy.deepcopy(SRR['job']), # deepcopy as job is modified in place! + SRR['job_status_success'], + SRR['end_of_sequence']] + body = dict(a1=1, a2=True, a3='str') + query = dict(return_timeout=20) + response, error = rest_volume.patch_volume(rest_api, 'uuid', body, query) + job = dict(SRR['job'][1]) # deepcopy as job is modified in place! + job['job_response'] = SRR['job_status_success'][1]['message'] + assert error is None + assert response == job + expected = call('PATCH', 'storage/volumes/uuid', {'return_timeout': 20}, json=body, headers=None, files=None) + assert expected in mock_request.mock_calls diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_vserver.py b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_vserver.py new file mode 100644 index 000000000..c646abed2 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/module_utils/test_rest_vserver.py @@ -0,0 +1,120 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2022, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" unit tests for module_utils rest_vserver.py + + Provides wrappers for svm/svms REST APIs +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible.module_utils import basic +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.module_utils import rest_vserver + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'svm_uuid': (200, {"records": [{"uuid": "test_uuid"}], "num_records": 1}, None), +}) + + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'cert_filepath': None, + 'key_filepath': None, +} + + +class MockONTAPModule: + def __init__(self): + self.module = basic.AnsibleModule(netapp_utils.na_ontap_host_argument_spec()) + + +def create_restapi_object(default_args, module_args=None): + module = create_module(MockONTAPModule, default_args, module_args) + return netapp_utils.OntapRestAPI(module.module) + + +def test_successfully_get_vserver(): + register_responses([ + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'svm/svms', SRR['zero_records']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_vserver.get_vserver(rest_api, 'svm_name') == ({'uuid': 'test_uuid'}, None) + assert rest_vserver.get_vserver(rest_api, 'svm_name') == (None, None) + + +def test_negative_get_vserver(): + register_responses([ + ('GET', 'svm/svms', SRR['generic_error']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_vserver.get_vserver(rest_api, 'svm_name') == (None, rest_error_message('', 'svm/svms')) + + +def test_successfully_get_vserver_uuid(): + register_responses([ + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'svm/svms', SRR['zero_records']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_vserver.get_vserver_uuid(rest_api, 'svm_name') == ('test_uuid', None) + assert rest_vserver.get_vserver_uuid(rest_api, 'svm_name') == (None, None) + + +def test_negative_get_vserver_uuid(): + register_responses([ + ('GET', 'svm/svms', SRR['generic_error']), + ('GET', 'svm/svms', SRR['generic_error']), + ('GET', 'svm/svms', SRR['zero_records']), + ('GET', 'svm/svms', SRR['zero_records']), + ]) + rest_api = create_restapi_object(DEFAULT_ARGS) + assert rest_vserver.get_vserver_uuid(rest_api, 'svm_name') == (None, rest_error_message('', 'svm/svms')) + assert expect_and_capture_ansible_exception(rest_vserver.get_vserver_uuid, 'fail', rest_api, 'svm_name', rest_api.module)['msg'] ==\ + rest_error_message('Error fetching vserver svm_name', 'svm/svms') + assert rest_vserver.get_vserver_uuid(rest_api, 'svm_name', error_on_none=True) == (None, 'vserver svm_name does not exist or is not a data vserver.') + assert expect_and_capture_ansible_exception(rest_vserver.get_vserver_uuid, 'fail', rest_api, 'svm_name', rest_api.module, error_on_none=True)['msg'] ==\ + 'Error vserver svm_name does not exist or is not a data vserver.' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/.gitignore b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/.gitignore new file mode 100644 index 000000000..bc1a1f616 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/.gitignore @@ -0,0 +1,2 @@ +# Created by pytest automatically. +* diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/CACHEDIR.TAG b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/CACHEDIR.TAG new file mode 100644 index 000000000..fce15ad7e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/CACHEDIR.TAG @@ -0,0 +1,4 @@ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/README.md b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/README.md new file mode 100644 index 000000000..b89018ced --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/README.md @@ -0,0 +1,8 @@ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/lastfailed b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/lastfailed new file mode 100644 index 000000000..ba7b58d20 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/lastfailed @@ -0,0 +1,3 @@ +{ + "test_na_ontap_lun_rest.py": true +} \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/nodeids b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/nodeids new file mode 100644 index 000000000..ca22cf9ee --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/nodeids @@ -0,0 +1,6 @@ +[ + "test_na_ontap_lun.py::TestMyModule::test_create_error_missing_param", + "test_na_ontap_lun.py::TestMyModule::test_module_fail_when_required_args_missing", + "test_na_ontap_lun_rest.py::TestMyModule::test_create_error_missing_param", + "test_na_ontap_lun_rest.py::TestMyModule::test_successful_create_appli" +] \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/stepwise b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/stepwise new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/.pytest_cache/v/cache/stepwise @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory.py new file mode 100644 index 000000000..7e108b081 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory.py @@ -0,0 +1,311 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP Ansible module na_ontap_active_directory ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + set_module_args, AnsibleExitJson, AnsibleFailJson, patch_ansible, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import zapi_responses, build_zapi_response +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_active_directory \ + import NetAppOntapActiveDirectory as my_module, main as my_main # module under test +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +# not available on 2.6 anymore +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def default_args(use_rest='never'): + return { + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'account_name': 'account_name', + 'vserver': 'vserver', + 'admin_password': 'admin_password', + 'admin_username': 'admin_username', + 'use_rest': use_rest + } + + +ad_info = { + 'attributes-list': { + 'active-directory-account-config': { + 'account-name': 'account_name', + 'domain': 'current.domain', + 'organizational-unit': 'current.ou', + } + } +} + + +ZRR = zapi_responses( + {'ad': build_zapi_response(ad_info, 1)} +) + +SRR = rest_responses({ + 'ad_1': (200, {"records": [{ + "fqdn": "server1.com", + "name": "account_name", + "organizational_unit": "CN=Test", + "svm": {"name": "svm1", "uuid": "02c9e252"} + }], "num_records": 1}, None), + 'ad_2': (200, {"records": [{ + "fqdn": "server2.com", + "name": "account_name", + "organizational_unit": "CN=Test", + "svm": {"name": "svm1", "uuid": "02c9e252"} + }], "num_records": 1}, None) +}) + + +def test_success_create(): + ''' test get''' + args = dict(default_args()) + args['domain'] = 'some.domain' + args['force_account_overwrite'] = True + args['organizational_unit'] = 'some.OU' + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['success']), + ('active-directory-account-create', ZRR['success']), + ]) + + with pytest.raises(AnsibleExitJson) as exc: + my_main() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] + + +def test_fail_create_zapi_error(): + ''' test get''' + args = dict(default_args()) + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['success']), + ('active-directory-account-create', ZRR['error']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error creating vserver Active Directory account_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == exc.value.args[0]['msg'] + + +def test_success_delete(): + ''' test get''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['ad']), + ('active-directory-account-delete', ZRR['success']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] + + +def test_fail_delete_zapi_error(): + ''' test get''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['ad']), + ('active-directory-account-delete', ZRR['error']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error deleting vserver Active Directory account_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == exc.value.args[0]['msg'] + + +def test_success_modify(): + ''' test get''' + args = dict(default_args()) + args['domain'] = 'some.other.domain' + args['force_account_overwrite'] = True + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['ad']), + ('active-directory-account-modify', ZRR['success']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] + + +def test_fail_modify_zapi_error(): + ''' test get''' + args = dict(default_args()) + args['domain'] = 'some.other.domain' + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['ad']), + ('active-directory-account-modify', ZRR['error']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error modifying vserver Active Directory account_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == exc.value.args[0]['msg'] + + +def test_fail_modify_on_ou(): + ''' test get''' + args = dict(default_args()) + args['organizational_unit'] = 'some.other.OU' + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['ad']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "Error: organizational_unit cannot be modified; found {'organizational_unit': 'some.other.OU'}." + assert msg == exc.value.args[0]['msg'] + + +def test_fail_on_get_zapi_error(): + ''' test get''' + args = dict(default_args()) + set_module_args(args) + register_responses([ + # list of tuples: (expected ZAPI, response) + ('active-directory-account-get-iter', ZRR['error']), + ]) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error searching for Active Directory account_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + ''' test get''' + args = dict(default_args()) + set_module_args(args) + mock_has_netapp_lib.return_value = False + with pytest.raises(AnsibleFailJson) as exc: + my_module() + assert 'Error: the python NetApp-Lib module is required. Import error: None' == exc.value.args[0]['msg'] + + +def test_fail_on_rest(): + ''' test error with rest versions less than 9.12.1''' + args = dict(default_args('always')) + set_module_args(args) + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']) + ]) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + assert 'Error: REST requires ONTAP 9.12.1 or later' in exc.value.args[0]['msg'] + + +def test_success_create_rest(): + ''' test create''' + args = dict(default_args('always')) + args['domain'] = 'server1.com' + args['force_account_overwrite'] = True + args['organizational_unit'] = 'CN=Test' + set_module_args(args) + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/active-directory', SRR['empty_records']), + ('POST', 'protocols/active-directory', SRR['success']), + ]) + + with pytest.raises(AnsibleExitJson) as exc: + my_main() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] + + +def test_success_delete_rest(): + ''' test delete rest''' + args = dict(default_args('always')) + args['state'] = 'absent' + set_module_args(args) + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/active-directory', SRR['ad_1']), + ('DELETE', 'protocols/active-directory/02c9e252', SRR['success']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] + + +def test_success_modify_rest(): + ''' test modify rest''' + args = dict(default_args('always')) + args['domain'] = 'some.other.domain' + args['force_account_overwrite'] = True + set_module_args(args) + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/active-directory', SRR['ad_1']), + ('PATCH', 'protocols/active-directory/02c9e252', SRR['success']), + ]) + + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/active-directory', SRR['generic_error']), + ('POST', 'protocols/active-directory', SRR['generic_error']), + ('PATCH', 'protocols/active-directory/02c9e252', SRR['generic_error']), + ('DELETE', 'protocols/active-directory/02c9e252', SRR['generic_error']) + ]) + ad_obj = create_module(my_module, default_args('always')) + ad_obj.svm_uuid = '02c9e252' + assert 'Error searching for Active Directory' in expect_and_capture_ansible_exception(ad_obj.get_active_directory_rest, 'fail')['msg'] + assert 'Error creating vserver Active Directory' in expect_and_capture_ansible_exception(ad_obj.create_active_directory_rest, 'fail')['msg'] + assert 'Error modifying vserver Active Directory' in expect_and_capture_ansible_exception(ad_obj.modify_active_directory_rest, 'fail')['msg'] + assert 'Error deleting vserver Active Directory' in expect_and_capture_ansible_exception(ad_obj.delete_active_directory_rest, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory_domain_controllers.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory_domain_controllers.py new file mode 100644 index 000000000..cedbe0519 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_active_directory_domain_controllers.py @@ -0,0 +1,177 @@ +# Copyright: NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_active_directory_preferred_domain_controllers """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import create_and_apply,\ + patch_ansible, call_main +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_active_directory_domain_controllers \ + import NetAppOntapActiveDirectoryDC as my_module, main as my_main # module under test + +# REST API canned responses when mocking send_request +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'DC_record': ( + 200, + { + "records": [ + { + "fqdn": "example.com", + "server_ip": "10.10.10.10", + 'svm': {"uuid": "3d52ad89-c278-11ed-a7b0-005056b3ed56"}, + } + ], + "num_records": 1 + }, None + ), + 'svm_record': ( + 200, + { + "records": [ + { + "uuid": "3d52ad89-c278-11ed-a7b0-005056b3ed56", + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'ansible', + 'fqdn': 'example.com', + 'server_ip': '10.10.10.10' +} + + +def test_rest_error_get_svm(): + '''Test error rest get svm''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['generic_error']), + ]) + error = call_main(my_main, ARGS_REST, fail=True)['msg'] + msg = "Error fetching vserver ansible: calling: svm/svms: got Expected error." + assert msg in error + + +def test_rest_error_get(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['generic_error']), + ]) + error = call_main(my_main, ARGS_REST, fail=True)['msg'] + msg = "Error on fetching Active Directory preferred DC configuration of an SVM:" + assert msg in error + + +def test_rest_error_create_active_directory_preferred_domain_controllers(): + '''Test error rest create active_directory preferred domain_controllers''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['empty_records']), + ('POST', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['generic_error']), + ]) + error = call_main(my_main, ARGS_REST, fail=True)['msg'] + msg = "Error on adding Active Directory preferred DC configuration to an SVM:" + assert msg in error + + +def test_rest_create_active_directory_preferred_domain_controllers(): + '''Test rest create active_directory preferred domain_controllers''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['empty_records']), + ('POST', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['empty_good']), + ]) + module_args = { + 'fqdn': 'example.com', + 'server_ip': '10.10.10.10' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_rest_delete_active_directory_preferred_domain_controllers(): + '''Test rest delete active_directory preferred domain_controllers''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['DC_record']), + ('DELETE', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers/example.com/10.10.10.10', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_error_delete_active_directory_preferred_domain_controllers(): + '''Test error rest delete active_directory preferred domain_controllers''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['DC_record']), + ('DELETE', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers/example.com/10.10.10.10', + SRR['generic_error']), + ]) + module_args = { + 'fqdn': 'example.com', + 'server_ip': '10.10.10.10', + 'state': 'absent' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on deleting Active Directory preferred DC configuration of an SVM:" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['DC_record']), + ]) + module_args = { + 'state': 'present', + 'fqdn': 'example.com', + 'server_ip': '10.10.10.10' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_0']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'protocols/active-directory/3d52ad89-c278-11ed-a7b0-005056b3ed56/preferred-domain-controllers', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py new file mode 100644 index 000000000..c93260dcf --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate.py @@ -0,0 +1,627 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_aggregate """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses, build_zapi_error + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_aggregate \ + import NetAppOntapAggregate as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +AGGR_NAME = 'aggr_name' +OS_NAME = 'abc' + +aggr_info = {'num-records': 3, + 'attributes-list': + {'aggr-attributes': + {'aggregate-name': AGGR_NAME, + 'aggr-raid-attributes': { + 'state': 'online', + 'disk-count': '4', + 'encrypt-with-aggr-key': 'true'}, + 'aggr-snaplock-attributes': {'snaplock-type': 'snap_t'}} + }, + } + +object_store_info = {'num-records': 1, + 'attributes-list': + {'object-store-information': {'object-store-name': OS_NAME}} + } + +disk_info = {'num-records': 1, + 'attributes-list': [ + {'disk-info': + {'disk-name': '1', + 'disk-raid-info': + {'disk-aggregate-info': + {'plex-name': 'plex0'} + }}}, + {'disk-info': + {'disk-name': '2', + 'disk-raid-info': + {'disk-aggregate-info': + {'plex-name': 'plex0'} + }}}, + {'disk-info': + {'disk-name': '3', + 'disk-raid-info': + {'disk-aggregate-info': + {'plex-name': 'plexM'} + }}}, + {'disk-info': + {'disk-name': '4', + 'disk-raid-info': + {'disk-aggregate-info': + {'plex-name': 'plexM'} + }}}, + ]} + +ZRR = zapi_responses({ + 'aggr_info': build_zapi_response(aggr_info), + 'object_store_info': build_zapi_response(object_store_info), + 'disk_info': build_zapi_response(disk_info), + 'error_disk_add': build_zapi_error(13003, 'disk add operation is in progress'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': AGGR_NAME, + 'use_rest': 'never', + 'feature_flags': {'no_cserver_ems': True} +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + error = create_module(my_module, fail=True)['msg'] + print('Info: %s' % error) + assert 'missing required arguments:' in error + assert 'name' in error + + +def test_create(): + register_responses([ + ('aggr-get-iter', ZRR['empty']), + ('aggr-create', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ]) + module_args = { + 'disk_type': 'ATA', + 'raid_type': 'raid_dp', + 'snaplock_type': 'non_snaplock', + # 'spare_pool': 'Pool0', + 'disk_count': 4, + 'raid_size': 5, + 'disk_size': 10, + # 'disk_size_with_unit': 'dsize_unit', + 'is_mirrored': True, + 'ignore_pool_checks': True, + 'encryption': True, + 'nodes': ['node1', 'node2'] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('aggr-destroy', ZRR['empty']) + ]) + module_args = { + 'state': 'absent', + 'disk_count': 3 + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_spare_pool(): + register_responses([ + ('aggr-get-iter', ZRR['empty']), + ('aggr-create', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ]) + module_args = { + 'disk_type': 'ATA', + 'raid_type': 'raid_dp', + 'snaplock_type': 'non_snaplock', + 'spare_pool': 'Pool0', + 'disk_count': 2, + 'raid_size': 5, + 'disk_size_with_unit': '10m', + # 'disk_size_with_unit': 'dsize_unit', + 'ignore_pool_checks': True, + 'encryption': True, + 'nodes': ['node1', 'node2'] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_disks(): + register_responses([ + ('aggr-get-iter', ZRR['empty']), + ('aggr-create', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ]) + module_args = { + 'disk_type': 'ATA', + 'raid_type': 'raid_dp', + 'snaplock_type': 'non_snaplock', + 'disks': [1, 2], + 'mirror_disks': [11, 12], + 'raid_size': 5, + 'disk_size_with_unit': '10m', + 'ignore_pool_checks': True, + 'encryption': True, + 'nodes': ['node1', 'node2'] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_create_wait_for_completion(mock_time): + register_responses([ + ('aggr-get-iter', ZRR['empty']), + ('aggr-create', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ('aggr-get-iter', ZRR['aggr_info']), + ]) + module_args = { + 'disk_count': '2', + 'is_mirrored': 'true', + 'wait_for_online': 'true' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_object_store(): + register_responses([ + ('aggr-get-iter', ZRR['empty']), + ('aggr-create', ZRR['empty']), + ('aggr-get-iter', ZRR['empty']), + ('aggr-object-store-attach', ZRR['empty']), + ]) + module_args = { + 'disk_class': 'capacity', + 'disk_count': '2', + 'is_mirrored': 'true', + 'object_store_name': 'abc', + 'allow_flexgroups': True + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_is_mirrored(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ]) + module_args = { + 'disk_count': '4', + 'is_mirrored': 'true', + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_disks_list(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['1', '2'], + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_mirror_disks(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['1', '2'], + 'mirror_disks': ['3', '4'] + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_spare_pool(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ]) + module_args = { + 'disk_count': '4', + 'spare_pool': 'Pool1' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_modify_encryption(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ]) + module_args = { + 'encryption': False + } + exc = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = 'Error: modifying encryption is not supported with ZAPI.' + assert msg in exc['msg'] + + +def test_rename(): + register_responses([ + ('aggr-get-iter', ZRR['empty']), # target does not exist + ('aggr-get-iter', ZRR['aggr_info']), # from exists + ('aggr-rename', ZRR['empty']), + ]) + module_args = { + 'from_name': 'test_name2' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rename_error_no_from(): + register_responses([ + ('aggr-get-iter', ZRR['empty']), # target does not exist + ('aggr-get-iter', ZRR['empty']), # from does not exist + ]) + module_args = { + 'from_name': 'test_name2' + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = 'Error renaming aggregate %s: no aggregate with from_name %s.' % (AGGR_NAME, module_args['from_name']) + assert msg in exception['msg'] + + +def test_rename_with_add_object_store(): # TODO: + register_responses([ + ('aggr-get-iter', ZRR['empty']), # target does not exist + ('aggr-get-iter', ZRR['aggr_info']), # from exists + ('aggr-object-store-get-iter', ZRR['empty']), # from does not have an OS + ('aggr-rename', ZRR['empty']), + ('aggr-object-store-attach', ZRR['empty']), + ]) + module_args = { + 'from_name': 'test_name2', + 'object_store_name': 'abc', + 'allow_flexgroups': False + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_object_store_present(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('aggr-object-store-get-iter', ZRR['object_store_info']), + ]) + module_args = { + 'object_store_name': 'abc' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_object_store_create(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('aggr-object-store-get-iter', ZRR['empty']), # object_store is not attached + ('aggr-object-store-attach', ZRR['empty']), + ]) + module_args = { + 'object_store_name': 'abc', + 'allow_flexgroups': True + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_object_store_modify(): + ''' not supported ''' + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('aggr-object-store-get-iter', ZRR['object_store_info']), + ]) + module_args = { + 'object_store_name': 'def' + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = 'Error: object store %s is already associated with aggregate %s.' % (OS_NAME, AGGR_NAME) + assert msg in exception['msg'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('aggr-get-iter', ZRR['error']), + ('aggr-online', ZRR['error']), + ('aggr-offline', ZRR['error']), + ('aggr-create', ZRR['error']), + ('aggr-destroy', ZRR['error']), + ('aggr-rename', ZRR['error']), + ('aggr-get-iter', ZRR['error']), + ]) + module_args = { + 'service_state': 'online', + 'unmount_volumes': 'True', + 'from_name': 'test_name2', + } + + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.aggr_get_iter, 'fail', module_args.get('name'))['msg'] + assert 'Error getting aggregate: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.aggregate_online, 'fail')['msg'] + assert 'Error changing the state of aggregate' in error + + error = expect_and_capture_ansible_exception(my_obj.aggregate_offline, 'fail')['msg'] + assert 'Error changing the state of aggregate' in error + + error = expect_and_capture_ansible_exception(my_obj.create_aggr, 'fail')['msg'] + assert 'Error provisioning aggregate' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_aggr, 'fail')['msg'] + assert 'Error removing aggregate' in error + + error = expect_and_capture_ansible_exception(my_obj.rename_aggregate, 'fail')['msg'] + assert 'Error renaming aggregate' in error + + my_obj.asup_log_for_cserver = Mock(return_value=None) + error = expect_and_capture_ansible_exception(my_obj.apply, 'fail')['msg'] + assert '12345:synthetic error for UT purpose' in error + + +def test_disks_bad_mapping(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['0'], + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = "Error mapping disks for aggregate %s: cannot match disks with current aggregate disks." % AGGR_NAME + assert exception['msg'].startswith(msg) + + +def test_disks_overlapping_mirror(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['1', '2', '3'], + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = "Error mapping disks for aggregate %s: found overlapping plexes:" % AGGR_NAME + assert exception['msg'].startswith(msg) + + +def test_disks_removing_disk(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['1'], + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = "Error removing disks is not supported. Aggregate %s: these disks cannot be removed: ['2']." % AGGR_NAME + assert exception['msg'].startswith(msg) + + +def test_disks_removing_mirror_disk(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['1', '2'], + 'mirror_disks': ['4', '6'] + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = "Error removing disks is not supported. Aggregate %s: these disks cannot be removed: ['3']." % AGGR_NAME + assert exception['msg'].startswith(msg) + + +def test_disks_add(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ('aggr-add', ZRR['empty']), + ]) + module_args = { + 'disks': ['1', '2', '5'], + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_disks_add_and_offline(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ('aggr-add', ZRR['empty']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['success']), + # error if max tries attempted. + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ('aggr-add', ZRR['empty']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']), + ('aggr-offline', ZRR['error_disk_add']) + ]) + module_args = { + 'disks': ['1', '2', '5'], 'service_state': 'offline' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert 'disk add operation is in progres' in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_mirror_disks_add(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ('aggr-add', ZRR['empty']), + ]) + module_args = { + 'disks': ['1', '2', '5'], + 'mirror_disks': ['3', '4', '6'] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_mirror_disks_add_unbalanced(): + register_responses([ + ('aggr-get-iter', ZRR['aggr_info']), + ('storage-disk-get-iter', ZRR['disk_info']), + ]) + module_args = { + 'disks': ['1', '2'], + 'mirror_disks': ['3', '4', '6'] + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = "Error cannot add mirror disks ['6'] without adding disks for aggregate %s." % AGGR_NAME + assert exception['msg'].startswith(msg) + + +def test_map_plex_to_primary_and_mirror_error_overlap(): + my_obj = create_module(my_module, DEFAULT_ARGS) + kwargs = { + 'plex_disks': {'plex1': [1, 2, 3], 'plex2': [4, 5, 6]}, + 'disks': [1, 4, 5], + 'mirror_disks': [] + } + error = expect_and_capture_ansible_exception(my_obj.map_plex_to_primary_and_mirror, 'fail', **kwargs)['msg'] + msg = "Error mapping disks for aggregate aggr_name: found overlapping plexes:" + assert error.startswith(msg) + + +def test_map_plex_to_primary_and_mirror_error_overlap_mirror(): + my_obj = create_module(my_module, DEFAULT_ARGS) + kwargs = { + 'plex_disks': {'plex1': [1, 2, 3], 'plex2': [4, 5, 6]}, + 'disks': [1, 4, 5], + 'mirror_disks': [1, 4, 5] + } + error = expect_and_capture_ansible_exception(my_obj.map_plex_to_primary_and_mirror, 'fail', **kwargs)['msg'] + msg = "Error mapping disks for aggregate aggr_name: found overlapping mirror plexes:" + error.startswith(msg) + + +def test_map_plex_to_primary_and_mirror_error_no_match(): + my_obj = create_module(my_module, DEFAULT_ARGS) + kwargs = { + 'plex_disks': {'plex1': [1, 2, 3], 'plex2': [4, 5, 6]}, + 'disks': [7, 8, 9], + 'mirror_disks': [10, 11, 12] + } + error = expect_and_capture_ansible_exception(my_obj.map_plex_to_primary_and_mirror, 'fail', **kwargs)['msg'] + msg = ("Error mapping disks for aggregate aggr_name: cannot match disks with current aggregate disks, " + "and cannot match mirror_disks with current aggregate disks.") + assert error.startswith(msg) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_disk_get_iter_error(): + register_responses([ + ('storage-disk-get-iter', ZRR['error']), + ]) + msg = 'Error getting disks: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == expect_and_capture_ansible_exception(create_module(my_module, DEFAULT_ARGS).disk_get_iter, 'fail', 'name')['msg'] + + +def test_object_store_get_iter_error(): + register_responses([ + ('aggr-object-store-get-iter', ZRR['error']), + ]) + msg = 'Error getting object store: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == expect_and_capture_ansible_exception(create_module(my_module, DEFAULT_ARGS).object_store_get_iter, 'fail', 'name')['msg'] + + +def test_attach_object_store_to_aggr_error(): + register_responses([ + ('aggr-object-store-attach', ZRR['error']), + ]) + module_args = { + 'object_store_name': 'os12', + } + msg = 'Error attaching object store os12 to aggregate aggr_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == expect_and_capture_ansible_exception(create_module(my_module, DEFAULT_ARGS, module_args).attach_object_store_to_aggr, 'fail')['msg'] + + +def test_add_disks_all_options_class(): + register_responses([ + ('aggr-add', ZRR['empty']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['ignore_pool_checks'] = True + my_obj.parameters['disk_class'] = 'performance' + assert my_obj.add_disks(count=2, disks=['1', '2'], disk_size=1, disk_size_with_unit='12GB') is None + + +def test_add_disks_all_options_type(): + register_responses([ + ('aggr-add', ZRR['empty']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['ignore_pool_checks'] = True + my_obj.parameters['disk_type'] = 'SSD' + assert my_obj.add_disks(count=2, disks=['1', '2'], disk_size=1, disk_size_with_unit='12GB') is None + + +def test_add_disks_error(): + register_responses([ + ('aggr-add', ZRR['error']), + ]) + msg = 'Error adding additional disks to aggregate aggr_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg == expect_and_capture_ansible_exception(create_module(my_module, DEFAULT_ARGS).add_disks, 'fail')['msg'] + + +def test_modify_aggr_offline(): + register_responses([ + ('aggr-offline', ZRR['empty']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert my_obj.modify_aggr({'service_state': 'offline'}) is None + + +def test_modify_aggr_online(): + register_responses([ + ('aggr-online', ZRR['empty']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert my_obj.modify_aggr({'service_state': 'online'}) is None diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate_rest.py new file mode 100644 index 000000000..1fc6bfbf2 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_aggregate_rest.py @@ -0,0 +1,616 @@ + +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_aggregate when using REST """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_aggregate \ + import NetAppOntapAggregate as my_module, main as my_main # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +# REST API canned responses when mocking send_request. +# The rest_factory provides default responses shared across testcases. +SRR = rest_responses({ + # module specific responses + 'one_record': (200, {'records': [ + {'uuid': 'ansible', '_tags': ['resource:cloud', 'main:aggr'], + 'block_storage': {'primary': {'disk_count': 5}}, + 'state': 'online', 'snaplock_type': 'snap'} + ]}, None), + 'two_records': (200, {'records': [ + {'uuid': 'ansible', + 'block_storage': {'primary': {'disk_count': 5}}, + 'state': 'online', 'snaplock_type': 'snap'}, + {'uuid': 'ansible', + 'block_storage': {'primary': {'disk_count': 5}}, + 'state': 'online', 'snaplock_type': 'snap'}, + ]}, None), + 'no_uuid': (200, {'records': [ + {'block_storage': {'primary': {'disk_count': 5}}, + 'state': 'online', 'snaplock_type': 'snap'}, + ]}, None), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'aggr_name' +} + + +def test_validate_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ], 'test_validate_options') + # no error! + my_obj = create_module(my_module, DEFAULT_ARGS) + assert my_obj.validate_options() is None + + my_obj.parameters['nodes'] = [1, 2] + + msg = 'Error when validating options: only one node can be specified when using rest' + assert msg in expect_and_capture_ansible_exception(my_obj.validate_options, 'fail')['msg'] + + my_obj.parameters['disk_count'] = 7 + my_obj.parameters.pop('nodes') + msg = 'Error when validating options: nodes is required when disk_count is present.' + assert msg in expect_and_capture_ansible_exception(my_obj.validate_options, 'fail')['msg'] + + my_obj.use_rest = False + my_obj.parameters['mirror_disks'] = [1, 2] + msg = 'Error when validating options: mirror_disks require disks options to be set.' + assert msg in expect_and_capture_ansible_exception(my_obj.validate_options, 'fail')['msg'] + + +def test_get_disk_size(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + + my_obj.parameters['disk_size'] = 1 + assert my_obj.get_disk_size() == 4096 + my_obj.parameters['disk_size'] = 1000 + assert my_obj.get_disk_size() == 4096000 + + my_obj.parameters.pop('disk_size') + my_obj.parameters['disk_size_with_unit'] = '1567' + assert my_obj.get_disk_size() == 1567 + my_obj.parameters['disk_size_with_unit'] = '1567K' + assert my_obj.get_disk_size() == 1567 * 1024 + my_obj.parameters['disk_size_with_unit'] = '1567gb' + assert my_obj.get_disk_size() == 1567 * 1024 * 1024 * 1024 + my_obj.parameters['disk_size_with_unit'] = '15.67gb' + assert my_obj.get_disk_size() == int(15.67 * 1024 * 1024 * 1024) + + my_obj.parameters['disk_size_with_unit'] = '1567rb' + error = expect_and_capture_ansible_exception(my_obj.get_disk_size, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: unexpected unit in disk_size_with_unit: 1567rb' == error + + my_obj.parameters['disk_size_with_unit'] = 'error' + error = expect_and_capture_ansible_exception(my_obj.get_disk_size, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: unexpected value in disk_size_with_unit: error' == error + + +def test_get_aggr_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['generic_error']) + ]) + error = expect_and_capture_ansible_exception(create_module(my_module, DEFAULT_ARGS).get_aggr_rest, 'fail', 'aggr1')['msg'] + print('Info: %s' % error) + assert 'Error: failed to get aggregate aggr1: calling: storage/aggregates: got Expected error.' == error + + +def test_get_aggr_rest_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + assert create_module(my_module, DEFAULT_ARGS).get_aggr_rest(None) is None + + +def test_get_aggr_rest_one_record(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['one_record']) + ]) + assert create_module(my_module, DEFAULT_ARGS).get_aggr_rest('aggr1') is not None + + +def test_get_aggr_rest_not_found(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), + ]) + assert create_module(my_module, DEFAULT_ARGS).get_aggr_rest('aggr1') is None + + +def test_create_aggr(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'storage/aggregates', SRR['empty_good']) + ]) + assert create_module(my_module, DEFAULT_ARGS).create_aggr_rest() is None + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'POST', 'storage/aggregates') + + +def test_aggr_tags(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'storage/aggregates', SRR['zero_records']), + ('POST', 'storage/aggregates', SRR['empty_good']), + # idempotent check + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'storage/aggregates', SRR['one_record']), + # modify tags + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'storage/aggregates', SRR['one_record']), + ('PATCH', 'storage/aggregates/ansible', SRR['success']) + ]) + args = {'tags': ['resource:cloud', 'main:aggr']} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, {'tags': ['main:aggr']})['changed'] + + +def test_create_aggr_all_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'storage/aggregates', SRR['empty_good']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['disk_class'] = 'capacity' + my_obj.parameters['disk_count'] = 12 + my_obj.parameters['disk_size_with_unit'] = '1567gb' + my_obj.parameters['is_mirrored'] = True + my_obj.parameters['nodes'] = ['node1'] + my_obj.parameters['raid_size'] = 4 + my_obj.parameters['raid_type'] = 'raid5' + my_obj.parameters['encryption'] = True + my_obj.parameters['snaplock_type'] = 'snap' + + assert my_obj.create_aggr_rest() is None + assert get_mock_record().is_record_in_json( + {'block_storage': {'primary': {'disk_class': 'capacity', 'disk_count': 12, 'raid_size': 4, 'raid_type': 'raid5'}, 'mirror': {'enabled': True}}}, + 'POST', 'storage/aggregates') + + +def test_create_aggr_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'storage/aggregates', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['disk_count'] = 12 + my_obj.parameters['disk_size_with_unit'] = '1567gb' + my_obj.parameters['is_mirrored'] = False + my_obj.parameters['nodes'] = ['node1'] + my_obj.parameters['raid_size'] = 4 + my_obj.parameters['raid_type'] = 'raid5' + my_obj.parameters['encryption'] = True + + error = expect_and_capture_ansible_exception(my_obj.create_aggr_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: failed to create aggregate: calling: storage/aggregates: got Expected error.' == error + + +def test_delete_aggr(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('DELETE', 'storage/aggregates/aggr_uuid', SRR['empty_good']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.uuid = 'aggr_uuid' + assert my_obj.delete_aggr_rest() is None + + +def test_delete_aggr_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('DELETE', 'storage/aggregates/aggr_uuid', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.parameters['disk_size_with_unit'] = '1567gb' + my_obj.parameters['is_mirrored'] = False + my_obj.parameters['nodes'] = ['node1'] + my_obj.parameters['raid_size'] = 4 + my_obj.parameters['raid_type'] = 'raid5' + my_obj.parameters['encryption'] = True + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.delete_aggr_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: failed to delete aggregate: calling: storage/aggregates/aggr_uuid: got Expected error.' == error + + +def test_patch_aggr(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['empty_good']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + my_obj.patch_aggr_rest('act on', {'key': 'value'}) + assert get_mock_record().is_record_in_json({'key': 'value'}, 'PATCH', 'storage/aggregates/aggr_uuid') + + +def test_patch_aggr_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.patch_aggr_rest, 'fail', 'act on', {'key': 'value'})['msg'] + print('Info: %s' % error) + assert 'Error: failed to act on aggregate: calling: storage/aggregates/aggr_uuid: got Expected error.' == error + + +def test_set_disk_count(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + current = {'disk_count': 2} + modify = {'disk_count': 5} + my_obj.set_disk_count(current, modify) + assert modify['disk_count'] == 3 + + +def test_set_disk_count_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + + current = {'disk_count': 9} + modify = {'disk_count': 5} + error = expect_and_capture_ansible_exception(my_obj.set_disk_count, 'fail', current, modify)['msg'] + print('Info: %s' % error) + assert 'Error: specified disk_count is less than current disk_count. Only adding disks is allowed.' == error + + +def test_add_disks(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['empty_good']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['disk_class'] = 'performance' + my_obj.parameters['disk_count'] = 12 + my_obj.uuid = 'aggr_uuid' + my_obj.add_disks_rest(count=2) + assert get_mock_record().is_record_in_json({'block_storage': {'primary': {'disk_count': 12}}}, 'PATCH', 'storage/aggregates/aggr_uuid') + + +def test_add_disks_error_local(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.add_disks_rest, 'fail', disks=[1, 2])['msg'] + print('Info: %s' % error) + assert 'Error: disks or mirror disks are mot supported with rest: [1, 2], None.' == error + + +def test_add_disks_error_remote(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['disk_count'] = 12 + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.add_disks_rest, 'fail', count=2)['msg'] + print('Info: %s' % error) + assert 'Error: failed to increase disk count for aggregate: calling: storage/aggregates/aggr_uuid: got Expected error.' == error + + +def test_rename_aggr(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['empty_good']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + my_obj.rename_aggr_rest() + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'PATCH', 'storage/aggregates/aggr_uuid') + + +def test_offline_online_aggr_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['generic_error']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + error = 'Error: failed to make service state online for aggregate' + assert error in expect_and_capture_ansible_exception(my_obj.aggregate_online, 'fail')['msg'] + error = 'Error: failed to make service state offline for aggregate' + assert error in expect_and_capture_ansible_exception(my_obj.aggregate_offline, 'fail')['msg'] + + +def test_rename_aggr_error_remote(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/aggregates/aggr_uuid', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.rename_aggr_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: failed to rename aggregate: calling: storage/aggregates/aggr_uuid: got Expected error.' == error + + +def test_get_object_store(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates/aggr_uuid/cloud-stores', SRR['one_record']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + record = my_obj.get_object_store_rest() + assert record + + +def test_get_object_store_error_remote(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates/aggr_uuid/cloud-stores', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.get_object_store_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: failed to get cloud stores for aggregate: calling: storage/aggregates/aggr_uuid/cloud-stores: got Expected error.' == error + + +def test_get_cloud_target_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cloud/targets', SRR['one_record']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['object_store_name'] = 'os12' + my_obj.uuid = 'aggr_uuid' + record = my_obj.get_cloud_target_uuid_rest() + assert record + + +def test_get_cloud_target_uuid_error_remote(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cloud/targets', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['object_store_name'] = 'os12' + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.get_cloud_target_uuid_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: failed to find cloud store with name os12: calling: cloud/targets: got Expected error.' == error + + +def test_attach_object_store_to_aggr(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cloud/targets', SRR['one_record']), # get object store UUID + ('POST', 'storage/aggregates/aggr_uuid/cloud-stores', SRR['empty_good']) # attach (POST) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['object_store_name'] = 'os12' + my_obj.parameters['allow_flexgroups'] = True + my_obj.uuid = 'aggr_uuid' + assert my_obj.attach_object_store_to_aggr_rest() == {} + + +def test_attach_object_store_to_aggr_error_remote(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cloud/targets', SRR['one_record']), # get object store UUID + ('POST', 'storage/aggregates/aggr_uuid/cloud-stores', SRR['generic_error']) # attach (POST) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['object_store_name'] = 'os12' + my_obj.uuid = 'aggr_uuid' + + error = expect_and_capture_ansible_exception(my_obj.attach_object_store_to_aggr_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: failed to attach cloud store with name os12: calling: storage/aggregates/aggr_uuid/cloud-stores: got Expected error.' == error + + +def test_apply_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['empty_good']), # create (POST) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'POST', 'storage/aggregates') + + +def test_apply_create_and_modify_service_state(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['empty_good']), # create (POST) + ('PATCH', 'storage/aggregates', SRR['success']), # modify service state + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'service_state': 'offline'})['changed'] + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'POST', 'storage/aggregates') + + +def test_apply_create_fail_to_read_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['two_records']), # create (POST) + ]) + msg = 'Error: failed to parse create aggregate response: calling: storage/aggregates: unexpected response' + assert msg in create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_apply_create_fail_to_read_uuid_key_missing(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['no_uuid']), # create (POST) + ]) + msg = 'Error: failed to parse create aggregate response: uuid key not present in' + assert msg in create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_apply_create_with_object_store(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['one_record']), # create (POST) + ('GET', 'cloud/targets', SRR['one_record']), # get object store uuid + ('POST', 'storage/aggregates/ansible/cloud-stores', SRR['empty_good']), # attach (POST) + ]) + module_args = { + 'object_store_name': 'os12' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'POST', 'storage/aggregates') + assert get_mock_record().is_record_in_json({'target': {'uuid': 'ansible'}}, 'POST', 'storage/aggregates/ansible/cloud-stores') + + +def test_apply_create_with_object_store_missing_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['empty_good']), # create (POST) + ]) + module_args = { + 'object_store_name': 'os12' + } + msg = 'Error: cannot attach cloud store with name os12: aggregate UUID is not set.' + assert create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'POST', 'storage/aggregates') + + +def test_apply_create_check_mode(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, check_mode=True)['changed'] + + +def test_apply_add_disks(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['one_record']), # get + ('PATCH', 'storage/aggregates/ansible', SRR['empty_good']), # patch (add disks) + ]) + module_args = { + 'disk_count': 12, + 'nodes': 'node1' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert get_mock_record().is_record_in_json({'block_storage': {'primary': {'disk_count': 12}}}, 'PATCH', 'storage/aggregates/ansible') + + +def test_apply_add_object_store(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['one_record']), # get + ('GET', 'storage/aggregates/ansible/cloud-stores', SRR['empty_records']), # get aggr cloud store + ('GET', 'cloud/targets', SRR['one_record']), # get object store uuid + ('POST', 'storage/aggregates/ansible/cloud-stores', SRR['empty_good']), # attach + ]) + module_args = { + 'object_store_name': 'os12', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert get_mock_record().is_record_in_json({'target': {'uuid': 'ansible'}}, 'POST', 'storage/aggregates/ansible/cloud-stores') + + +def test_apply_rename(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get aggr + ('GET', 'storage/aggregates', SRR['one_record']), # get from_aggr + ('PATCH', 'storage/aggregates/ansible', SRR['empty_good']), # patch (rename) + ]) + module_args = { + 'from_name': 'old_aggr', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert get_mock_record().is_record_in_json({'name': 'aggr_name'}, 'PATCH', 'storage/aggregates/ansible') + + +def test_apply_delete(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['one_record']), # get + ('DELETE', 'storage/aggregates/ansible', SRR['empty_good']), # delete + ]) + module_args = { + 'state': 'absent', + 'disk_count': 4 + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_get_aggr_actions_error_service_state_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + error = 'Error: Minimum version of ONTAP for service_state is (9, 11, 1)' + assert error in create_module(my_module, DEFAULT_ARGS, {'service_state': 'online', 'use_rest': 'always'}, fail=True)['msg'] + + +def test_get_aggr_actions_error_snaplock(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['one_record']), # get + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['snaplock_type'] = 'enterprise' + + error = expect_and_capture_ansible_exception(my_obj.get_aggr_actions, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error: snaplock_type is not modifiable. Cannot change to: enterprise.' == error + + +def test_main_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates', SRR['empty_records']), # get + ('POST', 'storage/aggregates', SRR['empty_good']), # create + ]) + set_module_args(DEFAULT_ARGS) + + assert expect_and_capture_ansible_exception(my_main, 'exit')['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py new file mode 100644 index 000000000..c971520f1 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport.py @@ -0,0 +1,264 @@ +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP autosupport Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_warning_was_raised, call_main, create_module, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport \ + import NetAppONTAPasup as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'node_name': 'node1', + 'retry_count': '16', + 'transport': 'http', + 'ondemand_enabled': 'true' + +} + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'one_asup_record': (200, { + "records": [{ + 'node': 'node1', + 'state': True, + 'from': 'Postmaster', + 'support': True, + 'transport': 'http', + 'url': 'support.netapp.com/asupprod/post/1.0/postAsup', + 'proxy_url': 'username1:********@host.com:8080', + 'hostname_subj': True, + 'nht': False, + 'perf': True, + 'retry_count': 16, + 'reminder': True, + 'max_http_size': 10485760, + 'max_smtp_size': 5242880, + 'remove_private_data': False, + 'local_collection': True, + 'ondemand_state': True, + 'ondemand_server_url': 'https://support.netapp.com/aods/asupmessage', + 'partner_address': ['test@example.com'] + }], + 'num_records': 1 + }, None) +}) + +autosupport_info = { + 'attributes': { + 'autosupport-config-info': { + 'is-enabled': 'true', + 'node-name': 'node1', + 'transport': 'http', + 'post-url': 'support.netapp.com/asupprod/post/1.0/postAsup', + 'from': 'Postmaster', + 'proxy-url': 'username1:********@host.com:8080', + 'retry-count': '16', + 'max-http-size': '10485760', + 'max-smtp-size': '5242880', + 'is-support-enabled': 'true', + 'is-node-in-subject': 'true', + 'is-nht-data-enabled': 'false', + 'is-perf-data-enabled': 'true', + 'is-reminder-enabled': 'true', + 'is-private-data-removed': 'false', + 'is-local-collection-enabled': 'true', + 'is-ondemand-enabled': 'true', + 'validate-digital-certificate': 'true', + + } + } +} + +ZRR = zapi_responses({ + 'autosupport_info': build_zapi_response(autosupport_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + assert 'missing required arguments:' in call_main(my_main, {}, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_ensure_get_called(): + register_responses([ + ('ZAPI', 'autosupport-config-get', ZRR['autosupport_info']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_autosupport_config() is not None + + +def test_successful_modify(): + ''' modifying asup and testing idempotency ''' + register_responses([ + ('ZAPI', 'autosupport-config-get', ZRR['autosupport_info']), + ('ZAPI', 'autosupport-config-modify', ZRR['success']), + # idempotency + ('ZAPI', 'autosupport-config-get', ZRR['autosupport_info']), + ]) + module_args = { + 'use_rest': 'never', + 'ondemand_enabled': False, + 'partner_addresses': [], + 'post_url': 'some_url', + 'from_address': 'from_add', + 'to_addresses': 'to_add', + 'hostname_in_subject': False, + 'nht_data_enabled': True, + 'perf_data_enabled': False, + 'reminder_enabled': False, + 'private_data_removed': True, + 'local_collection_enabled': False, + 'retry_count': 3, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + module_args = { + 'use_rest': 'never', + 'ondemand_enabled': True, + 'partner_addresses': [], + 'post_url': 'support.netapp.com/asupprod/post/1.0/postAsup', + 'from_address': 'Postmaster', + 'hostname_in_subject': True, + 'nht_data_enabled': False, + 'perf_data_enabled': True, + 'reminder_enabled': True, + 'private_data_removed': False, + 'local_collection_enabled': True, + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'autosupport-config-get', ZRR['error']), + # idempotency + ('ZAPI', 'autosupport-config-get', ZRR['autosupport_info']), + ('ZAPI', 'autosupport-config-modify', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'ondemand_enabled': False, + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == zapi_error_message('Error fetching info') + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == zapi_error_message('Error modifying asup') + + +def test_rest_modify_no_action(): + ''' modify asup ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/system/node/autosupport', SRR['one_asup_record']), + ]) + module_args = { + 'use_rest': 'always', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_prepopulate(): + ''' modify asup ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/system/node/autosupport', SRR['one_asup_record']), + ('PATCH', 'private/cli/system/node/autosupport', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'ondemand_enabled': False, + 'partner_addresses': [], + 'post_url': 'some_url', + 'from_address': 'from_add', + 'to_addresses': 'to_add', + 'hostname_in_subject': False, + 'nht_data_enabled': True, + 'perf_data_enabled': False, + 'reminder_enabled': False, + 'private_data_removed': True, + 'local_collection_enabled': False, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_pasword(): + ''' modify asup ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/system/node/autosupport', SRR['one_asup_record']), + ('PATCH', 'private/cli/system/node/autosupport', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + # different password, but no action + 'proxy_url': 'username1:password2@host.com:8080' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('na_ontap_autosupport is not idempotent because the password value in proxy_url cannot be compared.') + + +def test_rest_get_error(): + ''' modify asup ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/system/node/autosupport', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == rest_error_message('Error fetching info', 'private/cli/system/node/autosupport') + + +def test_rest_modify_error(): + ''' modify asup ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/system/node/autosupport', SRR['one_asup_record']), + ('PATCH', 'private/cli/system/node/autosupport', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + 'ondemand_enabled': False, + 'partner_addresses': [] + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == rest_error_message('Error modifying asup', 'private/cli/system/node/autosupport') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py new file mode 100644 index 000000000..872cffa1b --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_autosupport_invoke.py @@ -0,0 +1,103 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_autosupport_invoke ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_autosupport_invoke \ + import NetAppONTAPasupInvoke as invoke_module # module under test + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error") +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def invoke_successfully(self, xml, enable_tunneling): + raise netapp_utils.zapi.NaApiError('test', 'Expected error') + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_wwpn_alias ''' + + def setUp(self): + self.mock_invoke = { + 'name': 'test_node', + 'message': 'test_message', + 'type': 'all' + } + + def mock_args(self): + return { + 'message': self.mock_invoke['message'], + 'name': self.mock_invoke['name'], + 'type': self.mock_invoke['type'], + 'hostname': 'test_host', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_invoke_mock_object(self, use_rest=True): + invoke_obj = invoke_module() + if not use_rest: + invoke_obj.ems_log_event = Mock() + invoke_obj.server = MockONTAPConnection() + return invoke_obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_send(self, mock_request): + '''Test successful send message''' + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_invoke_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_send_error(self, mock_request): + '''Test rest send error''' + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_invoke_mock_object().apply() + msg = "Error on sending autosupport message to node %s: Expected error." % data['name'] + assert exc.value.args[0]['msg'] == msg + + def test_zapi_send_error(self): + '''Test rest send error''' + data = self.mock_args() + data['use_rest'] = 'Never' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_invoke_mock_object(use_rest=False).apply() + msg = "Error on sending autosupport message to node %s: NetApp API failed. Reason - test:Expected error." % data['name'] + assert exc.value.args[0]['msg'] == msg diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_bgp_peer_group.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_bgp_peer_group.py new file mode 100644 index 000000000..ea13a47fe --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_bgp_peer_group.py @@ -0,0 +1,211 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception, call_main +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_bgp_peer_group \ + import NetAppOntapBgpPeerGroup as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'bgpv4peer', + 'use_rest': 'always', + 'local': { + 'interface': { + 'name': 'lif1' + } + }, + 'peer': { + 'address': '10.10.10.7', + 'asn': 0 + } +} + + +SRR = rest_responses({ + 'bgp_peer_info': (200, {"records": [ + { + "ipspace": {"name": "exchange"}, + "local": { + "interface": {"ip": {"address": "10.10.10.7"}, "name": "lif1"}, + "port": {"name": "e1b", "node": {"name": "node1"}} + }, + "name": "bgpv4peer", + "peer": {"address": "10.10.10.7", "asn": 0}, + "state": "up", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }], "num_records": 1}, None), + 'bgp_modified': (200, {"records": [ + { + "ipspace": {"name": "exchange"}, + "local": { + "interface": {"ip": {"address": "10.10.10.7"}, "name": "lif1"}, + "port": {"name": "e1b", "node": {"name": "node1"}} + }, + "name": "bgpv4peer", + "peer": {"address": "10.10.10.8", "asn": 0}, + "state": "up", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }], "num_records": 1}, None), + 'bgp_name_modified': (200, {"records": [ + { + "ipspace": {"name": "exchange"}, + "local": { + "interface": {"ip": {"address": "10.10.10.7"}, "name": "lif1"}, + "port": {"name": "e1b", "node": {"name": "node1"}} + }, + "name": "newbgpv4peer", + "peer": {"address": "10.10.10.8", "asn": 0}, + "state": "up", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }], "num_records": 1}, None), + 'bgp_peer_info_ipv6': (200, {"records": [ + { + "ipspace": {"name": "exchange"}, + "name": "bgpv6peer", + "peer": {"address": "2402:940::45", "asn": 0}, + "state": "up", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }], "num_records": 1}, None), + 'bgp_modified_ipv6': (200, {"records": [ + { + "ipspace": {"name": "exchange"}, + "name": "bgpv6peer", + "peer": {"address": "2402:940::46", "asn": 0}, + "state": "up", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }], "num_records": 1}, None), +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "name"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_create_bgp_peer_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['empty_records']), + ('POST', 'network/ip/bgp/peer-groups', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_peer_info']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_modify_bgp_peer_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_peer_info']), + ('PATCH', 'network/ip/bgp/peer-groups/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_modified']), + # ipv6 modify + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_peer_info_ipv6']), + ('PATCH', 'network/ip/bgp/peer-groups/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_modified_ipv6']) + ]) + args = {'peer': {'address': '10.10.10.8'}} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + args = {'name': 'bgpv6peer', 'peer': {'address': '2402:0940:000:000:00:00:0000:0046'}} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_rename_modify_bgp_peer_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_peer_info']), + ('PATCH', 'network/ip/bgp/peer-groups/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_name_modified']) + ]) + args = {'from_name': 'bgpv4peer', 'name': 'newbgpv4peer', 'peer': {'address': '10.10.10.8'}} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_bgp_peer_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_peer_info']), + ('DELETE', 'network/ip/bgp/peer-groups/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['empty_records']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_all_methods_catch_exception(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # GET/POST/PATCH/DELETE error. + ('GET', 'network/ip/bgp/peer-groups', SRR['generic_error']), + ('POST', 'network/ip/bgp/peer-groups', SRR['generic_error']), + ('PATCH', 'network/ip/bgp/peer-groups/1cd8a442', SRR['generic_error']), + ('DELETE', 'network/ip/bgp/peer-groups/1cd8a442', SRR['generic_error']) + ]) + bgp_obj = create_module(my_module, DEFAULT_ARGS) + bgp_obj.uuid = '1cd8a442' + assert 'Error fetching BGP peer' in expect_and_capture_ansible_exception(bgp_obj.get_bgp_peer_group, 'fail')['msg'] + assert 'Error creating BGP peer' in expect_and_capture_ansible_exception(bgp_obj.create_bgp_peer_group, 'fail')['msg'] + assert 'Error modifying BGP peer' in expect_and_capture_ansible_exception(bgp_obj.modify_bgp_peer_group, 'fail', {})['msg'] + assert 'Error deleting BGP peer' in expect_and_capture_ansible_exception(bgp_obj.delete_bgp_peer_group, 'fail')['msg'] + + +def test_modify_rename_create_error(): + register_responses([ + # Error if both name and from_name not exist. + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['empty_records']), + ('GET', 'network/ip/bgp/peer-groups', SRR['empty_records']), + # Error if try to modify asn. + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['bgp_peer_info']), + # Error if peer and local not present in args when creating peer groups. + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/bgp/peer-groups', SRR['empty_records']) + ]) + assert 'Error renaming BGP peer group' in create_and_apply(my_module, DEFAULT_ARGS, {'from_name': 'name'}, fail=True)['msg'] + args = {'peer': {'asn': 5}} + assert 'Error: cannot modify peer asn.' in create_and_apply(my_module, DEFAULT_ARGS, args, fail=True)['msg'] + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['peer'] + del DEFAULT_ARGS_COPY['local'] + assert 'Error creating BGP peer group' in create_and_apply(my_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + +def test_error_ontap96(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + assert 'requires ONTAP 9.7.0 or later' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py new file mode 100644 index 000000000..5a38d3933 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_broadcast_domain.py @@ -0,0 +1,808 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain \ + import NetAppOntapBroadcastDomain as broadcast_domain_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.type = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'broadcast_domain': + xml = self.build_broadcast_domain_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_broadcast_domain_info(broadcast_domain_details): + ''' build xml data for broadcast_domain info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'net-port-broadcast-domain-info': { + 'broadcast-domain': broadcast_domain_details['name'], + 'ipspace': broadcast_domain_details['ipspace'], + 'mtu': broadcast_domain_details['mtu'], + 'ports': { + 'port-info': { + 'port': 'test_port_1' + } + } + } + + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.mock_broadcast_domain = { + 'name': 'test_broadcast_domain', + 'mtu': 1000, + 'ipspace': 'Default', + 'ports': 'test_port_1' + } + + def mock_args(self): + return { + 'name': self.mock_broadcast_domain['name'], + 'ipspace': self.mock_broadcast_domain['ipspace'], + 'mtu': self.mock_broadcast_domain['mtu'], + 'ports': self.mock_broadcast_domain['ports'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never', + 'feature_flags': {'no_cserver_ems': True} + } + + def get_broadcast_domain_mock_object(self, kind=None, data=None): + """ + Helper method to return an na_ontap_volume object + :param kind: passes this param to MockONTAPConnection() + :param data: passes this param to MockONTAPConnection() + :return: na_ontap_volume object + """ + broadcast_domain_obj = broadcast_domain_module() + broadcast_domain_obj.asup_log_for_cserver = Mock(return_value=None) + broadcast_domain_obj.cluster = Mock() + broadcast_domain_obj.cluster.invoke_successfully = Mock() + if kind is None: + broadcast_domain_obj.server = MockONTAPConnection() + else: + if data is None: + broadcast_domain_obj.server = MockONTAPConnection(kind='broadcast_domain', data=self.mock_broadcast_domain) + else: + broadcast_domain_obj.server = MockONTAPConnection(kind='broadcast_domain', data=data) + return broadcast_domain_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + broadcast_domain_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_nonexistent_net_route(self): + ''' Test if get_broadcast_domain returns None for non-existent broadcast_domain ''' + set_module_args(self.mock_args()) + result = self.get_broadcast_domain_mock_object().get_broadcast_domain() + assert result is None + + def test_create_error_missing_broadcast_domain(self): + ''' Test if create throws an error if broadcast_domain is not specified''' + data = self.mock_args() + del data['name'] + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').create_broadcast_domain() + msg = 'missing required arguments: name' + assert exc.value.args[0]['msg'] == msg + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.create_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_successful_create(self, get_broadcast_domain, create_broadcast_domain): + ''' Test successful create ''' + data = self.mock_args() + set_module_args(data) + get_broadcast_domain.side_effect = [None] + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object().apply() + assert exc.value.args[0]['changed'] + create_broadcast_domain.assert_called_with(None) + + def test_create_idempotency(self): + ''' Test create idempotency ''' + set_module_args(self.mock_args()) + obj = self.get_broadcast_domain_mock_object('broadcast_domain') + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.create_broadcast_domain') + def test_create_idempotency_identical_ports(self, create_broadcast_domain): + ''' Test create idemptency identical ports ''' + data = self.mock_args() + data['ports'] = ['test_port_1', 'test_port_1'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').apply() + assert not exc.value.args[0]['changed'] + + def test_modify_mtu(self): + ''' Test successful modify mtu ''' + data = self.mock_args() + data['mtu'] = 1200 + data['from_ipspace'] = 'test' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').apply() + assert exc.value.args[0]['changed'] + + def test_modify_ipspace_idempotency(self): + ''' Test modify ipsapce idempotency''' + data = self.mock_args() + data['ipspace'] = 'Default' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.add_broadcast_domain_ports') + def test_add_ports(self, add_broadcast_domain_ports): + ''' Test successful modify ports ''' + data = self.mock_args() + data['ports'] = 'test_port_1,test_port_2' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').apply() + assert exc.value.args[0]['changed'] + add_broadcast_domain_ports.assert_called_with(['test_port_2']) + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain_ports') + def test_delete_ports(self, delete_broadcast_domain_ports): + ''' Test successful modify ports ''' + data = self.mock_args() + data['ports'] = '' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').apply() + assert exc.value.args[0]['changed'] + delete_broadcast_domain_ports.assert_called_with(['test_port_1']) + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.modify_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.split_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_split_broadcast_domain(self, get_broadcast_domain, split_broadcast_domain, modify_broadcast_domain): + ''' Test successful split broadcast domain ''' + data = self.mock_args() + data['from_name'] = 'test_broadcast_domain' + data['name'] = 'test_broadcast_domain_2' + data['ports'] = 'test_port_2' + set_module_args(data) + current = { + 'domain-name': 'test_broadcast_domain', + 'mtu': 1000, + 'ipspace': 'Default', + 'ports': ['test_port_1,test_port2'] + } + get_broadcast_domain.side_effect = [ + None, + current + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object().apply() + assert exc.value.args[0]['changed'] + modify_broadcast_domain.assert_not_called() + split_broadcast_domain.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.modify_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_split_broadcast_domain_modify_delete(self, get_broadcast_domain, modify_broadcast_domain, delete_broadcast_domain): + ''' Test successful split broadcast domain ''' + data = self.mock_args() + data['from_name'] = 'test_broadcast_domain' + data['name'] = 'test_broadcast_domain_2' + data['ports'] = ['test_port_1', 'test_port_2'] + data['mtu'] = 1200 + set_module_args(data) + current = { + 'name': 'test_broadcast_domain', + 'mtu': 1000, + 'ipspace': 'Default', + 'ports': ['test_port_1', 'test_port2'] + } + get_broadcast_domain.side_effect = [ + None, + current + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object().apply() + assert exc.value.args[0]['changed'] + delete_broadcast_domain.assert_called_with('test_broadcast_domain') + modify_broadcast_domain.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_split_broadcast_domain_not_exist(self, get_broadcast_domain): + ''' Test split broadcast domain does not exist ''' + data = self.mock_args() + data['from_name'] = 'test_broadcast_domain' + data['name'] = 'test_broadcast_domain_2' + data['ports'] = 'test_port_2' + set_module_args(data) + + get_broadcast_domain.side_effect = [ + None, + None, + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_broadcast_domain_mock_object().apply() + msg = 'A domain cannot be split if it does not exist.' + assert exc.value.args[0]['msg'], msg + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.split_broadcast_domain') + def test_split_broadcast_domain_idempotency(self, split_broadcast_domain): + ''' Test successful split broadcast domain ''' + data = self.mock_args() + data['from_name'] = 'test_broadcast_domain' + data['ports'] = 'test_port_1' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object('broadcast_domain').apply() + assert not exc.value.args[0]['changed'] + split_broadcast_domain.assert_not_called() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_delete_broadcast_domain(self, get_broadcast_domain, delete_broadcast_domain): + ''' test delete broadcast domain ''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + current = { + 'name': 'test_broadcast_domain', + 'mtu': 1000, + 'ipspace': 'Default', + 'ports': ['test_port_1', 'test_port2'] + } + get_broadcast_domain.side_effect = [current] + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object().apply() + assert exc.value.args[0]['changed'] + delete_broadcast_domain.assert_called_with(current=current) + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_delete_broadcast_domain_idempotent(self, get_broadcast_domain, delete_broadcast_domain): + ''' test delete broadcast domain ''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + get_broadcast_domain.side_effect = [None] + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object().apply() + assert not exc.value.args[0]['changed'] + delete_broadcast_domain.assert_not_called() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.delete_broadcast_domain') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_broadcast_domain.NetAppOntapBroadcastDomain.get_broadcast_domain') + def test_delete_broadcast_domain_if_all_ports_are_removed(self, get_broadcast_domain, delete_broadcast_domain): + ''' test delete broadcast domain if all the ports are deleted ''' + data = self.mock_args() + data['ports'] = [] + data['state'] = 'present' + set_module_args(data) + current = { + 'name': 'test_broadcast_domain', + 'mtu': 1000, + 'ipspace': 'Default', + 'ports': ['test_port_1', 'test_port2'] + } + get_broadcast_domain.side_effect = [current] + with pytest.raises(AnsibleExitJson) as exc: + self.get_broadcast_domain_mock_object().apply() + assert exc.value.args[0]['changed'] + delete_broadcast_domain.assert_called_with(current=current) + + +def default_args(): + args = { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_7': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'port_detail_e0d': (200, { + "num_records": 1, + "records": [ + { + 'name': 'e0d', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': 'ea670505-2ab3-11ec-aa30-005056b3dfc8' + }] + }, None), + 'port_detail_e0a': (200, { + "num_records": 1, + "records": [ + { + 'name': 'e0a', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': 'ea63420b-2ab3-11ec-aa30-005056b3dfc8' + }] + }, None), + 'port_detail_e0b': (200, { + "num_records": 1, + "records": [ + { + 'name': 'e0b', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': 'ea64c0f2-2ab3-11ec-aa30-005056b3dfc8' + }] + }, None), + 'broadcast_domain_record': (200, { + "num_records": 1, + "records": [ + { + "uuid": "4475a2c8-f8a0-11e8-8d33-005056bb986f", + "name": "domain1", + "ipspace": {"name": "ip1"}, + "ports": [ + { + "uuid": "ea63420b-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0a", + "node": { + "name": "mohan9cluster2-01" + } + }, + { + "uuid": "ea64c0f2-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0b", + "node": { + "name": "mohan9cluster2-01" + } + }, + { + "uuid": "ea670505-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0d", + "node": { + "name": "mohan9cluster2-01" + } + } + ], + "mtu": 9000 + }] + }, None), + 'broadcast_domain_record_split': (200, { + "num_records": 1, + "records": [ + { + "uuid": "4475a2c8-f8a0-11e8-8d33-005056bb986f", + "name": "domain2", + "ipspace": {"name": "ip1"}, + "ports": [ + { + "uuid": "ea63420b-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0a", + "node": { + "name": "mohan9cluster2-01" + } + } + ], + "mtu": 9000 + }] + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + broadcast_domain_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_create_broadcast_domain(mock_request, patch_ansible): + ''' test create broadcast domain ''' + args = dict(default_args()) + args['name'] = "domain1" + args['ipspace'] = "ip1" + args['mtu'] = "9000" + args['ports'] = ["mohan9cluster2-01:e0a", "mohan9cluster2-01:e0b", "mohan9cluster2-01:e0d"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['zero_record'], # get + SRR['empty_good'], # create + SRR['empty_good'], # add e0a + SRR['empty_good'], # add e0b + SRR['empty_good'], # add e0c + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_create_broadcast_domain_idempotency(mock_request, patch_ansible): + ''' test create broadcast domain ''' + args = dict(default_args()) + args['name'] = "domain1" + args['ipspace'] = "ip1" + args['mtu'] = 9000 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['broadcast_domain_record'], # get + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_create_broadcast_domain_idempotency_identical_ports(mock_request, patch_ansible): + ''' test create broadcast domain ''' + args = dict(default_args()) + args['name'] = "domain2" + args['ipspace'] = "ip1" + args['mtu'] = 9000 + args['ports'] = ['mohan9cluster2-01:e0a', 'mohan9cluster2-01:e0a'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['broadcast_domain_record_split'], # get + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_modify_broadcast_domain(mock_request, patch_ansible): + ''' test modify broadcast domain mtu ''' + args = dict(default_args()) + args['name'] = "domain1" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['broadcast_domain_record'], # get + SRR['empty_good'], # modify + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rename_broadcast_domain(mock_request, patch_ansible): + ''' test modify broadcast domain mtu ''' + args = dict(default_args()) + args['from_name'] = "domain1" + args['name'] = "domain2" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0a", "mohan9cluster2-01:e0b", "mohan9cluster2-01:e0d"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['zero_record'], # get + SRR['broadcast_domain_record'], # get + SRR['empty_good'], # rename broadcast domain + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_split_broadcast_domain_create_domain2_with_e0a(mock_request, patch_ansible): + ''' test modify broadcast domain mtu ''' + args = dict(default_args()) + args['from_name'] = "domain1" + args['name'] = "domain2" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0a"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['zero_record'], # get + SRR['broadcast_domain_record'], # get + SRR['empty_good'], # create broadcast domain + SRR['empty_good'], # add e0a to domain2 + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_split_broadcast_domain_create_domain2_with_e0a_idempotent(mock_request, patch_ansible): + ''' test modify broadcast domain mtu ''' + args = dict(default_args()) + args['from_name'] = "domain1" + args['name'] = "domain2" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0a"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['broadcast_domain_record_split'], # get domain2 details + SRR['zero_record'], # empty record for domain1 + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_create_new_broadcast_domain_with_partial_match(mock_request, patch_ansible): + ''' test modify broadcast domain mtu ''' + args = dict(default_args()) + args['from_name'] = "domain2" + args['name'] = "domain1" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0b"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['zero_record'], # empty record for domain1 + SRR['broadcast_domain_record_split'], # get domain2 details + SRR['empty_good'], # create broadcast domain domain1 + SRR['empty_good'], # add e0b to domain1 + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_delete_broadcast_domain(mock_request, patch_ansible): + ''' test delete broadcast domain mtu ''' + args = dict(default_args()) + args['name'] = "domain1" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + args['state'] = "absent" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['broadcast_domain_record'], # get + SRR['empty_good'], # remove all the ports in broadcast domain + SRR['empty_good'], # delete broadcast domain + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_try_to_bad_format_port(mock_request, patch_ansible): + ''' test delete broadcast domain mtu ''' + args = dict(default_args()) + args['name'] = "domain1" + args['ipspace'] = "ip1" + args['mtu'] = 1500 + args['state'] = "present" + args['ports'] = ["mohan9cluster2-01e0a"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = broadcast_domain_module() + print('Info: %s' % exc.value.args[0]) + msg = "Error: Invalid value specified for port: mohan9cluster2-01e0a, provide port name as node_name:port_name" + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_try_to_create_domain_without_ipspace(mock_request, patch_ansible): + ''' test delete broadcast domain mtu ''' + args = dict(default_args()) + args['name'] = "domain1" + args['mtu'] = 1500 + args['state'] = "present" + args['ports'] = ["mohan9cluster2-01:e0a"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = broadcast_domain_module() + print('Info: %s' % exc.value.args[0]) + msg = "Error: ipspace space is a required option with REST" + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_modify_ipspace(mock_request, patch_ansible): + ''' test modify ipspace ''' + args = dict(default_args()) + args['name'] = "domain2" + args['from_ipspace'] = "ip1" + args['ipspace'] = "Default" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0b"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['zero_record'], # empty record for domain2 in ipspace Default + SRR['broadcast_domain_record_split'], # get domain2 details in ipspace ip1 + SRR['empty_good'], # modify ipspace + SRR['empty_good'], # add e0b to domain2 + SRR['empty_good'], # remove e0a + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_modify_name_and_ipspace(mock_request, patch_ansible): + ''' test modify ipspace ''' + args = dict(default_args()) + args['from_name'] = "domain2" + args['name'] = "domain1" + args['from_ipspace'] = "ip1" + args['ipspace'] = "Default" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0a"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['zero_record'], # empty record for domain2 in ipspace Default + SRR['broadcast_domain_record_split'], # get domain2 details in ipspace ip1 + SRR['empty_good'], # modify name, ipspace and mtu + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_split_name_ipspace_if_not_exact_match_of_ports(mock_request, patch_ansible): + ''' test create new domain as exact match not found ''' + args = dict(default_args()) + args['from_name'] = "domain2" + args['name'] = "domain1" + args['from_ipspace'] = "ip1" + args['ipspace'] = "Default" + args['mtu'] = 1500 + args['ports'] = ["mohan9cluster2-01:e0b"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['zero_record'], # empty record for domain1 in ipspace Default + SRR['broadcast_domain_record_split'], # get domain2 details in ipspace ip1 + SRR['empty_good'], # create new broadcast domain domain1 in ipspace Default + SRR['empty_good'], # Add e0b to domain1 + SRR['end_of_sequence'] + ] + my_obj = broadcast_domain_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py new file mode 100644 index 000000000..78c35ba73 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cg_snapshot.py @@ -0,0 +1,81 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_cg_snapshot''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cg_snapshot \ + import NetAppONTAPCGSnapshot as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, parm1=None): + ''' save arguments ''' + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'vserver': + xml = self.build_vserver_info(self.parm1) + self.xml_out = xml + return xml + + @staticmethod + def build_vserver_info(vserver): + ''' build xml data for vserser-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = netapp_utils.zapi.NaElement('attributes-list') + attributes.add_node_with_children('vserver-info', + **{'vserver-name': vserver}) + xml.add_child_elem(attributes) + # print(xml.to_string()) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_command_called(self): + ''' a more interesting test ''' + set_module_args({ + 'vserver': 'vserver', + 'volumes': 'volumes', + 'snapshot': 'snapshot', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + my_obj = my_module() + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.cgcreate() + msg = 'Error fetching CG ID for CG commit snapshot' + assert exc.value.args[0]['msg'] == msg diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py new file mode 100644 index 000000000..99aa0d140 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs.py @@ -0,0 +1,464 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_cifs ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + patch_ansible, call_main, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs \ + import NetAppONTAPCifsShare as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'cifs_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "name": 'cifs_share_name', + "path": '/', + "comment": 'CIFS share comment', + "unix_symlink": 'widelink', + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + }, + "access_based_enumeration": True, + "change_notify": True, + "encryption": False, + "home_directory": True, + "oplocks": False, + "continuously_available": True, + "show_snapshot": True, + "namespace_caching": True, + "allow_unencrypted_access": True, + "browsable": True, + "show_previous_versions": True + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + +cifs_record_info = { + 'num-records': 1, + 'attributes-list': { + 'cifs-share': { + 'share-name': 'cifs_share_name', + 'path': '/test', + 'vscan-fileop-profile': 'standard', + 'share-properties': [{'cifs-share-properties': 'browsable'}, {'cifs-share-properties': 'show_previous_versions'}], + 'symlink-properties': [{'cifs-share-symlink-properties': 'enable'}] + } + } +} + +ZRR = zapi_responses({ + 'cifs_record_info': build_zapi_response(cifs_record_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'admin', + 'password': 'netapp1!', + 'name': 'cifs_share_name', + 'path': '/test', + 'share_properties': ['browsable', 'show-previous-versions'], + 'symlink_properties': 'enable', + 'vscan_fileop_profile': 'standard', + 'vserver': 'abc', + 'use_rest': 'never' +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + error = 'missing required arguments:' + assert error in call_main(my_main, {}, fail=True)['msg'] + + +def test_get(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']) + ]) + cifs_obj = create_module(my_module, DEFAULT_ARGS) + result = cifs_obj.get_cifs_share() + assert result + + +def test_error_create(): + register_responses([ + ('cifs-share-get-iter', ZRR['empty']), + ('cifs-share-create', ZRR['error']), + ]) + module_args = { + 'state': 'present' + } + error = create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'Error creating cifs-share' in error + + +def test_create(): + register_responses([ + ('cifs-share-get-iter', ZRR['empty']), + ('cifs-share-create', ZRR['success']), + ]) + module_args = { + 'state': 'present', + 'comment': 'some_comment' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-delete', ZRR['success']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-delete', ZRR['error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Error deleting cifs-share' in error + + +def test_modify_path(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-modify', ZRR['success']), + ]) + module_args = { + 'path': '//' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_comment(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-modify', ZRR['success']), + ]) + module_args = { + 'comment': 'cifs modify' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_share_properties(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-modify', ZRR['success']), + ]) + module_args = { + 'share_properties': 'oplocks' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_symlink_properties(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-modify', ZRR['success']), + ]) + module_args = { + 'symlink_properties': 'read_only' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_vscan_fileop_profile(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-modify', ZRR['success']), + ]) + module_args = { + 'vscan_fileop_profile': 'strict' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']), + ('cifs-share-modify', ZRR['error']), + ]) + module_args = { + 'symlink_properties': 'read' + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Error modifying cifs-share' in error + + +def test_create_idempotency(): + register_responses([ + ('cifs-share-get-iter', ZRR['cifs_record_info']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] is False + + +def test_delete_idempotency(): + register_responses([ + ('cifs-share-get-iter', ZRR['empty']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] is False + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('cifs-share-create', ZRR['error']), + ('cifs-share-modify', ZRR['error']), + ('cifs-share-delete', ZRR['error']) + ]) + module_args = {} + + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.create_cifs_share, 'fail')['msg'] + assert 'Error creating cifs-share cifs_share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.modify_cifs_share, 'fail')['msg'] + assert 'Error modifying cifs-share cifs_share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_cifs_share, 'fail')['msg'] + assert 'Error deleting cifs-share cifs_share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'test_vserver', + 'name': 'cifs_share_name', + 'path': '/', + 'unix_symlink': 'widelink', +} + + +def test_options_support(): + ''' test option support ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + module_args = { + 'show_snapshot': True, + 'allow_unencrypted_access': True, + 'browsable': True + } + error = 'Error: Minimum version of ONTAP' + assert error in create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + + +def test_rest_successful_create(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['empty_records']), + ('POST', 'protocols/cifs/shares', SRR['empty_good']), + ]) + module_args = { + 'comment': 'CIFS share comment', + 'unix_symlink': 'disable' + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_delete_rest(): + ''' Test delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('DELETE', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent', + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_rest_error_get(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on fetching cifs shares: calling: protocols/cifs/shares: got Expected error.' in error + + +def test_rest_error_create(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['empty_records']), + ('POST', 'protocols/cifs/shares', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on creating cifs shares:' in error + + +def test_error_delete_rest(): + ''' Test error delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('DELETE', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on deleting cifs shares:' in error + + +def test_modify_cifs_share_path(): + ''' test modify CIFS share path ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/cifs_share_name', SRR['empty_good']), + ]) + module_args = { + 'path': "\\vol1" + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_cifs_share_comment(): + ''' test modify CIFS share comment ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/cifs_share_name', SRR['empty_good']), + ]) + module_args = { + 'comment': "cifs comment modify" + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_cifs_share_properties(): + ''' test modify CIFS share properties ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/cifs_share_name', SRR['empty_good']), + ]) + module_args = { + 'unix_symlink': "disable" + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_cifs_share_properties_2(): + ''' test modify CIFS share properties ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/cifs_share_name', SRR['empty_good']), + ]) + module_args = { + "access_based_enumeration": False, + "change_notify": False, + "encryption": True, + "oplocks": True, + "continuously_available": False, + "show_snapshot": False, + "namespace_caching": False, + "allow_unencrypted_access": False, + "browsable": False, + "show_previous_versions": False + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_modify_cifs_share_path(): + ''' test modify CIFS share path error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/cifs_share_name', SRR['generic_error']), + ]) + module_args = { + 'path': "\\vol1" + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on modifying cifs shares:' in error + + +def test_error_modify_cifs_share_comment(): + ''' test modify CIFS share comment error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/cifs_share_name', SRR['generic_error']), + ]) + module_args = { + 'comment': "cifs comment modify" + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on modifying cifs shares:' in error + + +def test_rest_successful_create_idempotency(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']) + ]) + module_args = { + 'use_rest': 'always' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False + + +def test_rest_successful_delete_idempotency(): + '''Test successful rest delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['empty_records']) + ]) + module_args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_acl.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_acl.py new file mode 100644 index 000000000..1d0d565cd --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_acl.py @@ -0,0 +1,412 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_cifs_acl """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_acl \ + import NetAppONTAPCifsAcl as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +SHARE_NAME = 'share_name' + +acl_info = {'num-records': 1, + 'attributes-list': + {'cifs-share-access-control': + {'share': SHARE_NAME, + 'user-or-group': 'user123', + 'permission': 'full_control', + 'user-group-type': 'windows' + } + }, + } + +ZRR = zapi_responses({ + 'acl_info': build_zapi_response(acl_info), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'permission': 'full_control', + 'share_name': 'share_name', + 'user_or_group': 'user_or_group', + 'vserver': 'vserver', + 'use_rest': 'never', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + error_msg = create_module(my_module, fail=True)['msg'] + for fragment in 'missing required arguments:', 'hostname', 'share_name', 'user_or_group', 'vserver': + assert fragment in error_msg + assert 'permission' not in error_msg + + args = dict(DEFAULT_ARGS) + args.pop('permission') + msg = 'state is present but all of the following are missing: permission' + assert create_module(my_module, args, fail=True)['msg'] == msg + + +def test_create(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['empty']), + ('cifs-share-access-control-create', ZRR['success']), + ]) + module_args = { + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_type(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['empty']), + ('cifs-share-access-control-create', ZRR['success']), + ]) + module_args = { + 'type': 'unix_group' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['acl_info']), + ('cifs-share-access-control-delete', ZRR['success']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotent(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['empty']), + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['acl_info']), + ('cifs-share-access-control-modify', ZRR['success']), + ]) + module_args = { + 'permission': 'no_access', + 'type': 'windows' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_modify_idempotent(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['acl_info']), + ]) + module_args = { + 'permission': 'full_control', + 'type': 'windows' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_modify_with_type(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['acl_info']), + ]) + module_args = { + 'type': 'unix_group' + } + msg = 'Error: changing the type is not supported by ONTAP - current: windows, desired: unix_group' + assert create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_negative_modify_with_extra_stuff(): + register_responses([ + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + current = {'share_name': 'extra'} + msg = "Error: only permission can be changed - modify: {'share_name': 'share_name'}" + assert msg in expect_and_capture_ansible_exception(my_module_object.get_modify, 'fail', current)['msg'] + + current = {'share_name': 'extra', 'permission': 'permission'} + # don't check dict contents as order may differ + msg = "Error: only permission can be changed - modify:" + assert msg in expect_and_capture_ansible_exception(my_module_object.get_modify, 'fail', current)['msg'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['error']), + ('cifs-share-access-control-create', ZRR['error']), + ('cifs-share-access-control-modify', ZRR['error']), + ('cifs-share-access-control-delete', ZRR['error']), + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + + msg = 'Error getting cifs-share-access-control share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_cifs_acl, 'fail')['msg'] + + msg = 'Error creating cifs-share-access-control share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg in expect_and_capture_ansible_exception(my_module_object.create_cifs_acl, 'fail')['msg'] + + msg = 'Error modifying cifs-share-access-control permission share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg in expect_and_capture_ansible_exception(my_module_object.modify_cifs_acl_permission, 'fail')['msg'] + + msg = 'Error deleting cifs-share-access-control share_name: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert msg in expect_and_capture_ansible_exception(my_module_object.delete_cifs_acl, 'fail')['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg in create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_main(): + register_responses([ + ('cifs-share-access-control-get-iter', ZRR['empty']), + ('cifs-share-access-control-create', ZRR['success']), + ]) + set_module_args(DEFAULT_ARGS) + assert expect_and_capture_ansible_exception(my_main, 'exit')['changed'] + + +SRR = rest_responses({ + 'acl_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "share": "share_name", + "user_or_group": "Everyone", + "permission": "full_control", + "type": "windows" + } + ], "num_records": 1}, None), + 'cifs_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "name": 'share_name', + "path": '/', + "comment": 'CIFS share comment', + "unix_symlink": 'widelink', + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + } + } + ], + "num_records": 1 + }, None + ) +}) + +ARGS_REST = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'permission': 'full_control', + 'share_name': 'share_name', + 'user_or_group': 'Everyone', + 'vserver': 'vserver', + 'type': 'windows', + 'use_rest': 'always', +} + + +def test_error_get_acl_rest(): + ''' Test get error with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on fetching cifs shares acl:' in error + + +def test_error_get_share_rest(): + ''' Test get share not exists with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on fetching cifs shares:' in error + + +def test_error_get_no_share_rest(): + ''' Test get share not exists with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['empty_records']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error: the cifs share does not exist:' in error + + +def test_create_rest(): + ''' Test create with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['empty_records']), + ('POST', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['empty_good']), + ]) + assert create_and_apply(my_module, ARGS_REST) + + +def test_delete_rest(): + ''' Test delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['acl_record']), + ('DELETE', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls/Everyone/windows', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_create_error_rest(): + ''' Test create error with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['empty_records']), + ('POST', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on creating cifs share acl:' in error + + +def test_error_delete_rest(): + ''' Test delete error with rest API ''' + module_args = { + 'state': 'absent' + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['acl_record']), + ('DELETE', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls/Everyone/windows', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on deleting cifs share acl:' in error + + +def test_modify_rest(): + ''' Test modify with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['acl_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls/Everyone/windows', SRR['empty_good']), + ]) + module_args = { + 'permission': 'no_access' + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_modify_rest(): + ''' Test modify error with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['acl_record']), + ('PATCH', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls/Everyone/windows', SRR['generic_error']) + ]) + module_args = {'permission': 'no_access'} + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + msg = 'Error modifying cifs share ACL permission: '\ + 'calling: protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls/Everyone/windows: got Expected error.' + assert msg == error + + +def test_error_get_modify_rest(): + ''' Test modify error with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['acl_record']), + ]) + module_args = { + 'type': 'unix_group' + } + msg = 'Error: changing the type is not supported by ONTAP - current: windows, desired: unix_group' + assert create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] == msg + + +def test_negative_modify_with_extra_stuff_rest(): + ''' Test modify error with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']) + ]) + my_module_object = create_module(my_module, ARGS_REST) + current = {'share_name': 'extra'} + msg = "Error: only permission can be changed - modify: {'share_name': 'share_name'}" + assert msg in expect_and_capture_ansible_exception(my_module_object.get_modify, 'fail', current)['msg'] + + current = {'share_name': 'extra', 'permission': 'permission'} + # don't check dict contents as order may differ + msg = "Error: only permission can be changed - modify:" + assert msg in expect_and_capture_ansible_exception(my_module_object.get_modify, 'fail', current)['msg'] + + +def test_delete_idempotent_rest(): + ''' Test delete idempotency with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_create_modify_idempotent_rest(): + ''' Test create and modify idempotency with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/shares', SRR['cifs_record']), + ('GET', 'protocols/cifs/shares/671aa46e-11ad-11ec-a267-005056b30cfa/share_name/acls', SRR['acl_record']), + ]) + module_args = { + 'permission': 'full_control', + 'type': 'windows' + } + assert not create_and_apply(my_module, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group.py new file mode 100644 index 000000000..afe73d191 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group.py @@ -0,0 +1,218 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_cifs_local_group ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, call_main, create_module, expect_and_capture_ansible_exception, AnsibleFailJson, create_and_apply +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_group \ + import NetAppOntapCifsLocalGroup as group_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'group_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansible" + }, + 'name': 'BUILTIN\\Guests', + 'sid': 'S-1-5-21-256008430-3394229847-3930036330-1001', + } + ], "num_records": 1}, None), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +ARGS_REST = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', +} + + +def test_get_existent_cifs_local_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ]) + cifs_obj = create_module(group_module, ARGS_REST) + result = cifs_obj.get_cifs_local_group_rest() + assert result + + +def test_error_get_existent_cifs_local_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = 'Error on fetching cifs local-group:' + assert msg in error + + +def test_create_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['empty_records']), + ('POST', 'protocols/cifs/local-groups', SRR['empty_good']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_create_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['empty_records']), + ('POST', 'protocols/cifs/local-groups', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on creating cifs local-group:" + assert msg in error + + +def test_delete_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('DELETE', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001', SRR['empty_good']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + 'state': 'absent' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_delete_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('DELETE', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + 'state': 'absent' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on deleting cifs local-group:" + assert msg in error + + +def test_modify_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('PATCH', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001', SRR['empty_good']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + 'description': 'This is local group' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_modify_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('PATCH', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + 'description': 'This is local group' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on modifying cifs local-group:" + assert msg in error + + +def test_rename_cifs_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('PATCH', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001', SRR['empty_good']), + ]) + module_args = { + 'vserver': 'ansible', + 'from_name': 'BUILTIN\\GUESTS', + 'name': 'ANSIBLE_CIFS\\test_users' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_rest_rename_cifs_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/local-groups', SRR['empty_records']), + ('GET', 'protocols/cifs/local-groups', SRR['empty_records']), + ]) + module_args = { + 'vserver': 'ansible', + 'from_name': 'BUILTIN\\GUESTS_user', + 'name': 'ANSIBLE_CIFS\\test_users' + } + error = create_and_apply(group_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error renaming cifs local group:' in error + + +def test_successfully_create_group_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ]) + module_args = { + 'vserver': 'ansible', + 'name': 'BUILTIN\\GUESTS', + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_successfully_destroy_group_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group_member.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group_member.py new file mode 100644 index 000000000..8afd0c56a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_group_member.py @@ -0,0 +1,338 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_cifs_local_group_member ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, call_main, create_module, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_group_member \ + import NetAppOntapCifsLocalGroupMember as group_member_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'group_member_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + 'group_name': 'BUILTIN\\Guests', + 'member': 'test', + 'sid': 'S-1-5-21-256008430-3394229847-3930036330-1001', + } + ], "num_records": 1}, None), + 'group_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + 'group_name': 'BUILTIN\\Guests', + 'sid': 'S-1-5-21-256008430-3394229847-3930036330-1001', + } + ], "num_records": 1}, None), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + +group_member_info = {'num-records': 1, + 'attributes-list': + {'cifs-local-group-members': + {'group-name': 'BUILTIN\\GUESTS', + 'member': 'test', + 'vserver': 'ansible' + } + }, + } + +ZRR = zapi_responses({ + 'group_member_info': build_zapi_response(group_member_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + 'use_rest': 'never', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + group_member_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_cifs_group_member(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['empty']) + ]) + cifs_obj = create_module(group_member_module, DEFAULT_ARGS) + result = cifs_obj.get_cifs_local_group_member() + assert result is None + + +def test_get_existent_cifs_group_member(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['group_member_info']) + ]) + cifs_obj = create_module(group_member_module, DEFAULT_ARGS) + result = cifs_obj.get_cifs_local_group_member() + assert result + + +def test_successfully_add_members_zapi(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['empty']), + ('cifs-local-group-members-add-members', ZRR['success']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_add_members_zapi(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['empty']), + ('cifs-local-group-members-add-members', ZRR['error']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error adding member" + assert msg in error + + +def test_successfully_remove_members_zapi(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['group_member_info']), + ('cifs-local-group-members-remove-members', ZRR['success']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_remove_members_zapi(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['group_member_info']), + ('cifs-local-group-members-remove-members', ZRR['error']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + 'state': 'absent' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error removing member" + assert msg in error + + +def test_successfully_add_members_zapi_idempotency(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['group_member_info']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_remove_members_zapi_idempotency(): + register_responses([ + ('cifs-local-group-members-get-iter', ZRR['empty']), + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +ARGS_REST = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + 'use_rest': 'always', +} + + +def test_get_nonexistent_cifs_local_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['empty_records']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'nogroup', + 'member': 'test', + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = 'CIFS local group nogroup does not exist on vserver ansible' + assert msg in error + + +def test_get_existent_cifs_local_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['group_member_record']), + ]) + cifs_obj = create_module(group_member_module, ARGS_REST) + result = cifs_obj.get_cifs_local_group_member() + assert result + + +def test_error_get_existent_cifs_local_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = 'Error getting CIFS local group members for group BUILTIN\\GUESTS on vserver ansible' + assert msg in error + + +def test_add_cifs_group_member_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['empty_records']), + ('POST', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['empty_good']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_add_cifs_group_member_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['empty_records']), + ('POST', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error adding member test to cifs local group BUILTIN\\GUESTS on vserver" + assert msg in error + + +def test_remove_cifs_group_member_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['group_member_record']), + ('DELETE', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['empty_good']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + 'state': 'absent' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_remove_cifs_group_member_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['group_member_record']), + ('DELETE', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['generic_error']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + 'state': 'absent' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error removing member test from cifs local group BUILTIN\\GUESTS on vserver ansible" + assert msg in error + + +def test_successfully_add_members_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['group_member_record']), + ]) + module_args = { + 'vserver': 'ansible', + 'group': 'BUILTIN\\GUESTS', + 'member': 'test', + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_successfully_remove_members_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-groups', SRR['group_record']), + ('GET', 'protocols/cifs/local-groups/671aa46e-11ad-11ec-a267-005056b30cfa/' + 'S-1-5-21-256008430-3394229847-3930036330-1001/members', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user.py new file mode 100644 index 000000000..812512a06 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user.py @@ -0,0 +1,204 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, call_main, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_user \ + import NetAppOntapCifsLocalUser as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'local_user_sid': (200, { + "records": [{ + "sid": "S-1-5-21-256008430-3394229847-3930036330-1001", + "members": [{ + "name": "string" + }], + "name": "SMB_SERVER01\\username", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "description": "This is a local group", + "full_name": "User Name", + "account_disabled": False + }] + }, None), + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'name': "username" +} + + +def test_low_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + error = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: na_ontap_cifs_local_user only supports REST, and requires ONTAP 9.10.1 or later.' + assert msg in error + + +def test_get_svm_uuid_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['generic_error']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.get_svm_uuid, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error fetching vserver vserver: calling: svm/svms: got Expected error.' == error + + +def test_get_cifs_local_user_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['zero_records']), + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_cifs_local_user() is None + + +def test_get_cifs_local_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['generic_error']), + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching cifs/local-user username: calling: protocols/cifs/local-users: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_cifs_local_user, 'fail')['msg'] + + +def test_get_cifs_local_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['local_user_sid']), + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_cifs_local_user() is not None + + +def test_create_cifs_local_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['empty_records']), + ('POST', 'protocols/cifs/local-users', SRR['empty_good']) + ]) + module_args = {'name': 'username', + 'user_password': 'password', + 'account_disabled': 'False', + 'full_name': 'User Name', + 'description': 'Test user'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_cifs_local_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'protocols/cifs/local-users', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['name'] = 'username' + my_obj.parameters['user_password'] = 'password' + my_obj.parameters['account_disabled'] = False + my_obj.parameters['full_name'] = 'User Name' + my_obj.parameters['description'] = 'This is a local group' + error = expect_and_capture_ansible_exception(my_obj.create_cifs_local_user, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating CIFS local users with name username: calling: protocols/cifs/local-users: got Expected error.' == error + + +def test_delete_cifs_local_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['local_user_sid']), + ('DELETE', 'protocols/cifs/local-users/e3cb5c7f-cd20/S-1-5-21-256008430-3394229847-3930036330-1001', SRR['empty_good']) + ]) + module_args = {'name': 'username', + 'state': 'absent', + 'user_password': 'password', + 'description': 'This is a local group'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_cifs_local_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('DELETE', 'protocols/cifs/local-users/e3cb5c7f-cd20/S-1-5-21-256008430-3394229847-3930036330-1001', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.svm_uuid = 'e3cb5c7f-cd20' + my_obj.sid = 'S-1-5-21-256008430-3394229847-3930036330-1001' + my_obj.parameters['name'] = 'username' + my_obj.parameters['state'] = 'absent' + my_obj.parameters['user_password'] = 'password' + my_obj.parameters['description'] = 'This is a local group' + error = expect_and_capture_ansible_exception(my_obj.delete_cifs_local_user, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error while deleting CIFS local user: calling: '\ + 'protocols/cifs/local-users/e3cb5c7f-cd20/S-1-5-21-256008430-3394229847-3930036330-1001: got Expected error.' == error + + +def test_modify_cifs_local_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['local_user_sid']), + ('PATCH', 'protocols/cifs/local-users/e3cb5c7f-cd20/S-1-5-21-256008430-3394229847-3930036330-1001', SRR['empty_good']) + ]) + module_args = {'name': 'username', + 'user_password': 'mypassword', + 'description': 'This is a local group2', + 'account_disabled': True, + 'full_name': 'Full Name'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_cifs_local_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('PATCH', 'protocols/cifs/local-users/e3cb5c7f-cd20/S-1-5-21-256008430-3394229847-3930036330-1001', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.svm_uuid = 'e3cb5c7f-cd20' + my_obj.sid = 'S-1-5-21-256008430-3394229847-3930036330-1001' + my_obj.parameters['name'] = 'username' + my_obj.parameters['user_password'] = 'mypassword' + my_obj.parameters['description'] = 'This is a local group2' + current = {'description': 'This is a local group'} + error = expect_and_capture_ansible_exception(my_obj.modify_cifs_local_user, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error while modifying CIFS local user: calling: '\ + 'protocols/cifs/local-users/e3cb5c7f-cd20/S-1-5-21-256008430-3394229847-3930036330-1001: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_modify.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_modify.py new file mode 100644 index 000000000..44e75a856 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_modify.py @@ -0,0 +1,223 @@ +''' unit tests ONTAP Ansible module: na_ontap_cifs_local_user_modify ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_user_modify \ + import NetAppOntapCifsLocalUserModify as cifs_user_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'cifs_user_record': (200, { + "records": [{ + 'vserver': 'ansible', + 'user_name': 'ANSIBLE\\Administrator', + 'is_account_disabled': False, + 'full_name': 'test user', + 'description': 'builtin admin' + }] + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'local_user': + xml = self.build_local_user_info() + elif self.type == 'local_user_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_local_user_info(): + ''' build xml data for cifs-local-user ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'cifs-local-user': { + 'user-name': 'ANSIBLE\\Administrator', + 'is-account-disabled': 'false', + 'vserver': 'ansible', + 'full-name': 'test user', + 'description': 'builtin admin' + } + } + } + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self, use_rest=None): + if self.onbox: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + vserver = 'ansible' + name = 'ANSIBLE\\Administrator' + is_account_disabled = False + + else: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + vserver = 'ansible' + name = 'ANSIBLE\\Administrator' + is_account_disabled = False + + args = dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'vserver': vserver, + 'name': name, + 'is_account_disabled': is_account_disabled + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_local_user_mock_object(cx_type='zapi', kind=None): + local_user_obj = cifs_user_module() + if cx_type == 'zapi': + if kind is None: + local_user_obj.server = MockONTAPConnection() + else: + local_user_obj.server = MockONTAPConnection(kind=kind) + return local_user_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + cifs_user_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_cifs_local_user_modify for non-existent user''' + set_module_args(self.set_default_args(use_rest='Never')) + print('starting') + my_obj = cifs_user_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = self.server + assert my_obj.get_cifs_local_user is not None + + def test_ensure_get_called_existing(self): + ''' test get_cifs_local_user_modify for existing user''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = cifs_user_module() + my_obj.server = MockONTAPConnection(kind='local_user') + assert my_obj.get_cifs_local_user() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_user_modify.NetAppOntapCifsLocalUserModify.modify_cifs_local_user') + def test_successful_modify(self, modify_cifs_local_user): + ''' enabling local cifs user and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['is_account_disabled'] = True + set_module_args(data) + my_obj = cifs_user_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('local_user') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = cifs_user_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('local_user') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = cifs_user_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('local_user_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_cifs_local_user(modify={}) + assert 'Error modifying local CIFS user' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_local_user_mock_object(cx_type='rest').apply() + msg = 'calling: private/cli/vserver/cifs/users-and-groups/local-user: got %s.' % SRR['generic_error'][2] + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_rest(self, mock_request): + data = self.set_default_args() + data['is_account_disabled'] = True + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cifs_user_record'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_local_user_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_modify_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cifs_user_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_local_user_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password.py new file mode 100644 index 000000000..62c8352b7 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password.py @@ -0,0 +1,66 @@ +# (c) 2021-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP disks Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import call_main, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_user_set_password import main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +ZRR = zapi_responses({ +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never', + 'user_password': 'test', + 'user_name': 'user1', + 'vserver': 'svm1', +} + + +def test_successful_set_password(patch_ansible): + ''' successful set ''' + register_responses([ + ('ZAPI', 'cifs-local-user-set-password', ZRR['success']), + ]) + assert call_main(my_main, DEFAULT_ARGS)['changed'] + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + error = 'missing required arguments:' + assert error in call_main(my_main, {}, fail=True)['msg'] + + +def test_if_all_methods_catch_exception(patch_ansible): + register_responses([ + ('ZAPI', 'cifs-local-user-set-password', ZRR['error']), + ]) + assert 'Error setting password ' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + error = 'Error: the python NetApp-Lib module is required. Import error: None' + assert error in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password_rest.py new file mode 100644 index 000000000..f32a1b2fa --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_local_user_set_password_rest.py @@ -0,0 +1,101 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + patch_ansible, call_main, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_local_user_set_password \ + import NetAppONTAPCifsSetPassword as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None), + 'local_user_sid': (200, {"records": [{'sid': '1234-sd'}]}, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'user_name': 'carchi8py', + 'user_password': 'p@SSWord', + 'vserver': 'vserver' +} + + +def test_change_password(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['local_user_sid']), + ('PATCH', 'protocols/cifs/local-users/e3cb5c7f-cd20/1234-sd', SRR['empty_good']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {})['changed'] + + +def test_get_svm_uuid_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['generic_error']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.get_svm_uuid, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error fetching vserver vserver: calling: svm/svms: got Expected error.' == error + + +def test_get_cifs_local_users_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/local-users', SRR['generic_error']), + # 2nd call + ('GET', 'protocols/cifs/local-users', SRR['zero_records']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.get_user_sid, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error fetching cifs/local-user carchi8py: calling: protocols/cifs/local-users: got Expected error.' == error + # no user + error = 'Error no cifs/local-user with name carchi8py' + assert error in expect_and_capture_ansible_exception(my_obj.get_user_sid, 'fail')['msg'] + + +def test_patch_cifs_local_users_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/cifs/local-users', SRR['local_user_sid']), + ('PATCH', 'protocols/cifs/local-users/e3cb5c7f-cd20/1234-sd', SRR['generic_error']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.cifs_local_set_passwd_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error change password for user carchi8py: calling: protocols/cifs/local-users/e3cb5c7f-cd20/1234-sd: got Expected error.' == error + + +def test_fail_old_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always' + } + error = 'Error: REST requires ONTAP 9.10.1 or later for protocols/cifs/local-users APIs.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py new file mode 100644 index 000000000..820c33d17 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cifs_server.py @@ -0,0 +1,770 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_cifs_server ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cifs_server \ + import NetAppOntapcifsServer as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'cifs_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "enabled": True, + "security": { + "encrypt_dc_connection": False, + "smb_encryption": False, + "kdc_encryption": False, + "smb_signing": False, + "restrict_anonymous": "no_enumeration", + "aes_netlogon_enabled": False, + "ldap_referral_enabled": False, + "session_security": "none", + "try_ldap_channel_binding": True, + "use_ldaps": False, + "use_start_tls": False + }, + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + }, + "name": "cifs_server_name" + } + ], + "num_records": 1 + }, None + ), + 'cifs_record_disabled': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "enabled": False, + "security": { + "encrypt_dc_connection": False, + "smb_encryption": False, + "kdc_encryption": False, + "smb_signing": False, + "restrict_anonymous": "no_enumeration", + "aes_netlogon_enabled": False, + "ldap_referral_enabled": False, + "session_security": "none", + "try_ldap_channel_binding": True, + "use_ldaps": False, + "use_start_tls": False + }, + "target": { + "nam,e": "20:05:00:50:56:b3:0c:fa" + }, + "name": "cifs_server_name" + } + ], + "num_records": 1 + }, None + ), + 'cifs_records_renamed': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "enabled": True, + "security": { + "encrypt_dc_connection": False, + "smb_encryption": False, + "kdc_encryption": False, + "smb_signing": False, + "restrict_anonymous": "no_enumeration", + "aes_netlogon_enabled": False, + "ldap_referral_enabled": False, + "session_security": "none", + "try_ldap_channel_binding": True, + "use_ldaps": False, + "use_start_tls": False + }, + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + }, + "name": "cifs" + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +cifs_record_info = { + 'num-records': 1, + 'attributes-list': { + 'cifs-server-config': { + 'cifs-server': 'cifs_server', + 'administrative-status': 'up'} + } +} +cifs_record_disabled_info = { + 'num-records': 1, + 'attributes-list': { + 'cifs-server-config': { + 'cifs-server': 'cifs_server', + 'administrative-status': 'down'} + } +} + +ZRR = zapi_responses({ + 'cifs_record_info': build_zapi_response(cifs_record_info), + 'cifs_record_disabled_info': build_zapi_response(cifs_record_disabled_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'cifs_server_name': 'cifs_server', + 'vserver': 'vserver', + 'use_rest': 'never', + 'feature_flags': {'no_cserver_ems': True} +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get(): + register_responses([ + ('cifs-server-get-iter', ZRR['cifs_record_info']) + ]) + cifs_obj = create_module(my_module, DEFAULT_ARGS) + result = cifs_obj.get_cifs_server() + assert result + + +def test_create_unsupport_zapi(): + """ check for zapi unsupported options """ + module_args = { + "use_rest": "never", + "encrypt_dc_connection": "false", + "smb_encryption": "false", + "kdc_encryption": "false", + "smb_signing": "false" + } + msg = 'Error: smb_signing ,encrypt_dc_connection ,kdc_encryption ,smb_encryption options supported only with REST.' + assert msg == create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_create(): + register_responses([ + ('cifs-server-get-iter', ZRR['empty']), + ('cifs-server-create', ZRR['success']) + ]) + module_args = { + 'workgroup': 'test', + 'ou': 'ou', + 'domain': 'test', + 'admin_user_name': 'user1', + 'admin_password': 'password' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_service_state_started(): + register_responses([ + ('cifs-server-get-iter', ZRR['empty']), + ('cifs-server-create', ZRR['success']), + # idempotent check + ('cifs-server-get-iter', ZRR['cifs_record_info']) + ]) + module_args = { + 'workgroup': 'test', + 'ou': 'ou', + 'domain': 'test', + 'admin_user_name': 'user1', + 'admin_password': 'password', + 'service_state': 'started' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_service_state_stopped(): + register_responses([ + ('cifs-server-get-iter', ZRR['empty']), + ('cifs-server-create', ZRR['success']), + # idempotent check + ('cifs-server-get-iter', ZRR['cifs_record_disabled_info']) + ]) + module_args = { + 'workgroup': 'test', + 'ou': 'ou', + 'domain': 'test', + 'admin_user_name': 'user1', + 'admin_password': 'password', + 'service_state': 'stopped' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_force(): + register_responses([ + ('cifs-server-get-iter', ZRR['empty']), + ('cifs-server-create', ZRR['success']), + ]) + module_args = { + 'workgroup': 'test', + 'ou': 'ou', + 'domain': 'test', + 'admin_user_name': 'user1', + 'admin_password': 'password', + 'force': 'true' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_idempotent(): + register_responses([ + ('cifs-server-get-iter', ZRR['cifs_record_info']) + ]) + module_args = { + 'state': 'present' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotent(): + register_responses([ + ('cifs-server-get-iter', ZRR['empty']) + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete(): + register_responses([ + ('cifs-server-get-iter', ZRR['cifs_record_info']), + ('cifs-server-delete', ZRR['success']), + ]) + module_args = { + 'workgroup': 'test', + 'ou': 'ou', + 'domain': 'test', + 'admin_user_name': 'user1', + 'admin_password': 'password', + 'force': 'false', + 'state': 'absent' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_start_service_state(): + register_responses([ + ('cifs-server-get-iter', ZRR['cifs_record_info']), + ('cifs-server-stop', ZRR['success']), + ]) + module_args = { + 'service_state': 'stopped' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args) + + +def test_stop_service_state(): + register_responses([ + ('cifs-server-get-iter', ZRR['cifs_record_disabled_info']), + ('cifs-server-start', ZRR['success']), + ]) + module_args = { + 'service_state': 'started' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args) + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('cifs-server-create', ZRR['error']), + ('cifs-server-start', ZRR['error']), + ('cifs-server-stop', ZRR['error']), + ('cifs-server-delete', ZRR['error']) + ]) + module_args = {} + + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.create_cifs_server, 'fail')['msg'] + assert 'Error Creating cifs_server cifs_server: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.start_cifs_server, 'fail')['msg'] + assert 'Error modifying cifs_server cifs_server: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.stop_cifs_server, 'fail')['msg'] + assert 'Error modifying cifs_server cifs_server: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_cifs_server, 'fail')['msg'] + assert 'Error deleting cifs_server cifs_server: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'test_vserver', + 'name': 'cifs_server_name', +} + + +def test_rest_error_get(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on fetching cifs:' in error + + +def test_module_error_ontap_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + module_args = {'use_rest': 'always', 'force': True} + error = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Minimum version of ONTAP for force is (9, 11)' in error + + +def test_rest_successful_create(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + ]) + assert create_and_apply(my_module, ARGS_REST) + + +def test_rest_successful_create_with_force(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + ]) + module_args = { + 'force': True + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_rest_successful_create_with_user(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + # idempotent check. + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ]) + module_args = { + 'admin_user_name': 'test_user', + 'admin_password': 'pwd' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + assert not create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_create_with_service_state(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + # idempotent check. + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record_disabled']), + ]) + module_args = { + 'admin_user_name': 'test_user', + 'admin_password': 'pwd', + 'service_state': 'stopped' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + assert not create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_create_with_ou(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + ]) + module_args = { + 'ou': 'ou' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_create_with_domain(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + ]) + module_args = { + 'domain': 'domain' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_create_with_security(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['empty_good']), + ]) + module_args = { + 'smb_encryption': True, + 'smb_signing': True, + 'kdc_encryption': True, + 'encrypt_dc_connection': True, + 'restrict_anonymous': 'no_enumeration' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_version_error_with_security_encryption(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + module_args = { + 'use_rest': 'always', + 'encrypt_dc_connection': True, + } + error = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Minimum version of ONTAP for encrypt_dc_connection is (9, 8)' in error + + +def test_module_error_ontap_version_security(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']) + ]) + module_args = { + "aes_netlogon_enabled": False + } + error = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Minimum version of ONTAP for aes_netlogon_enabled is (9, 10, 1)' in error + + +def test_rest_error_create(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('POST', 'protocols/cifs/services', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on creating cifs:' in error + + +def test_delete_rest(): + ''' Test delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('DELETE', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent', + 'admin_user_name': 'test_user', + 'admin_password': 'pwd' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_delete_with_force_rest(): + ''' Test delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('DELETE', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent', + 'force': True, + 'admin_user_name': 'test_user', + 'admin_password': 'pwd' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_error_delete_rest(): + ''' Test error delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('DELETE', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on deleting cifs server:' in error + + +def test_rest_successful_disable(): + '''Test successful rest disable''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'service_state': 'stopped' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_enable(): + '''Test successful rest enable''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record_disabled']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'service_state': 'started' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_security_modify(): + '''Test successful rest enable''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record_disabled']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'smb_encryption': True, + 'smb_signing': True, + 'kdc_encryption': True, + 'restrict_anonymous': "no_enumeration" + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_security_modify_encrypt(): + '''Test successful rest enable''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/cifs/services', SRR['cifs_record_disabled']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'encrypt_dc_connection': True + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_negative_security_options_modify(): + '''Test error rest enable''' + register_responses([ + ]) + module_args = { + "aes_netlogon_enabled": True, + "ldap_referral_enabled": True, + "session_security": "seal", + "try_ldap_channel_binding": False, + "use_ldaps": True, + "use_start_tls": True + } + msg = 'parameters are mutually exclusive: use_ldaps|use_start_tls' + assert msg in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_successful_security_options_modify(): + '''Test successful rest enable''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/cifs/services', SRR['cifs_record_disabled']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + "aes_netlogon_enabled": True, + "ldap_referral_enabled": True, + "session_security": "seal", + "try_ldap_channel_binding": False, + "use_ldaps": True + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_rename_cifs(): + '''Test successful rest rename''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('GET', 'protocols/cifs/services', SRR['cifs_record_disabled']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + 'from_name': 'cifs_server_name', + 'name': 'cifs', + 'force': True, + 'admin_user_name': 'test_user', + 'admin_password': 'pwd' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_rename_modify_cifs(): + '''Test successful rest rename''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + 'from_name': 'cifs_server_name', + 'name': 'cifs', + 'force': True, + 'admin_user_name': 'test_user', + 'admin_password': 'pwd', + 'service_state': 'stopped' + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_error_rest_rename_cifs_without_force(): + '''Test error rest rename with force false''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ]) + module_args = { + 'from_name': 'cifs_servers', + 'name': 'cifs1', + 'force': False, + 'admin_user_name': 'test_user', + 'admin_password': 'pwd' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error: cannot rename cifs server from cifs_servers to cifs1 without force.' in error + + +def test_error_rest_rename_error_state(): + '''Test error rest rename with service state as started''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + 'from_name': 'cifs_servers', + 'name': 'cifs1', + 'force': True, + 'admin_user_name': 'test_user', + 'admin_password': 'pwd', + 'service_state': 'started' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + msg = 'Error on modifying cifs server: calling: protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa:' + assert msg in error + + +def test_error_rest_rename_cifs(): + '''Test error rest rename''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ('GET', 'protocols/cifs/services', SRR['empty_records']), + ]) + module_args = { + 'from_name': 'cifs_servers_test', + 'name': 'cifs1', + 'force': True, + 'admin_user_name': 'test_user', + 'admin_password': 'pwd' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error renaming cifs server: cifs1 - no cifs server with from_name: cifs_servers_test' in error + + +def test_rest_error_disable(): + '''Test error rest disable''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']), + ('PATCH', 'protocols/cifs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + 'service_state': 'stopped' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on modifying cifs server:' in error + + +def test_rest_successful_create_idempotency(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['cifs_record']) + ]) + module_args = {'use_rest': 'always'} + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False + + +def test_rest_successful_delete_idempotency(): + '''Test successful rest delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/cifs/services', SRR['empty_records']) + ]) + module_args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py new file mode 100644 index 000000000..89fe069a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster.py @@ -0,0 +1,688 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_cluster ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock, call +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster \ + import NetAppONTAPCluster as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'cluster': + xml = self.build_cluster_info() + if self.type == 'cluster_success': + xml = self.build_cluster_info_success() + elif self.type == 'cluster_add': + xml = self.build_add_node_info() + elif self.type == 'cluster_extra_input': + self.type = 'cluster' # success on second call + raise netapp_utils.zapi.NaApiError(code='TEST1', message="Extra input: single-node-cluster") + elif self.type == 'cluster_extra_input_loop': + raise netapp_utils.zapi.NaApiError(code='TEST2', message="Extra input: single-node-cluster") + elif self.type == 'cluster_extra_input_other': + raise netapp_utils.zapi.NaApiError(code='TEST3', message="Extra input: other-unexpected-element") + elif self.type == 'cluster_fail': + raise netapp_utils.zapi.NaApiError(code='TEST4', message="This exception is from the unit test") + self.xml_out = xml + return xml + + def autosupport_log(self): + ''' mock autosupport log''' + return None + + @staticmethod + def build_cluster_info(): + ''' build xml data for cluster-create-join-progress-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'attributes': { + 'cluster-create-join-progress-info': { + 'is-complete': 'true', + 'status': 'whatever' + } + } + } + xml.translate_struct(attributes) + return xml + + @staticmethod + def build_cluster_info_success(): + ''' build xml data for cluster-create-join-progress-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'attributes': { + 'cluster-create-join-progress-info': { + 'is-complete': 'false', + 'status': 'success' + } + } + } + xml.translate_struct(attributes) + return xml + + @staticmethod + def build_add_node_info(): + ''' build xml data for cluster-create-add-node-status-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'attributes-list': { + 'cluster-create-add-node-status-info': { + 'failure-msg': '', + 'status': 'success' + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.use_vsim = False + + def set_default_args(self, use_rest='never'): + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + cluster_name = 'abc' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'cluster_name': cluster_name, + 'use_rest': use_rest + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('time.sleep') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + def test_ensure_apply_for_cluster_called(self, get_cl_id, sleep_mock): + ''' creating cluster and checking idempotency ''' + get_cl_id.return_value = None + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.create_cluster') + def test_cluster_create_called(self, cluster_create, get_cl_id, sleep_mock): + ''' creating cluster''' + get_cl_id.return_value = None + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_success') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + cluster_create.assert_called_with() + + @patch('time.sleep') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + def test_cluster_create_old_api(self, get_cl_id, sleep_mock): + ''' creating cluster''' + get_cl_id.return_value = None + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_extra_input') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + def test_cluster_create_old_api_loop(self, get_cl_id): + ''' creating cluster''' + get_cl_id.return_value = None + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_extra_input_loop') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = 'TEST2:Extra input: single-node-cluster' + print('Info: test_cluster_apply: %s' % repr(exc.value)) + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + def test_cluster_create_old_api_other_extra(self, get_cl_id): + ''' creating cluster''' + get_cl_id.return_value = None + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_extra_input_other') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = 'TEST3:Extra input: other-unexpected-element' + print('Info: test_cluster_apply: %s' % repr(exc.value)) + assert msg in exc.value.args[0]['msg'] + + @patch('time.sleep') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.add_node') + def test_add_node_called(self, add_node, get_cl_id, get_cl_ips, sleep_mock): + ''' creating add_node''' + get_cl_ips.return_value = [] + get_cl_id.return_value = None + data = self.set_default_args() + del data['cluster_name'] + data['cluster_ip_address'] = '10.10.10.10' + set_module_args(data) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_add') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + add_node.assert_called_with() + assert exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_cluster() + assert 'Error creating cluster' in exc.value.args[0]['msg'] + data = self.set_default_args() + data['cluster_ip_address'] = '10.10.10.10' + set_module_args(data) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.add_node() + assert 'Error adding node with ip' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.add_node') + def test_add_node_idempotent(self, add_node, get_cl_id, get_cl_ips): + ''' creating add_node''' + get_cl_ips.return_value = ['10.10.10.10'] + get_cl_id.return_value = None + data = self.set_default_args() + del data['cluster_name'] + data['cluster_ip_address'] = '10.10.10.10' + set_module_args(data) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_add') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + try: + add_node.assert_not_called() + except AttributeError: + # not supported with python <= 3.4 + pass + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.node_remove_wait') + def test_remove_node_ip(self, wait, remove_node, get_cl_id, get_cl_ips): + ''' creating add_node''' + get_cl_ips.return_value = ['10.10.10.10'] + get_cl_id.return_value = None + wait.return_value = None + data = self.set_default_args() + # del data['cluster_name'] + data['cluster_ip_address'] = '10.10.10.10' + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_add') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + remove_node.assert_called_with() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_ip_addresses') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node') + def test_remove_node_ip_idempotent(self, remove_node, get_cl_id, get_cl_ips): + ''' creating add_node''' + get_cl_ips.return_value = [] + get_cl_id.return_value = None + data = self.set_default_args() + # del data['cluster_name'] + data['cluster_ip_address'] = '10.10.10.10' + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_add') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + try: + remove_node.assert_not_called() + except AttributeError: + # not supported with python <= 3.4 + pass + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_nodes') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.node_remove_wait') + def test_remove_node_name(self, wait, remove_node, get_cl_id, get_cl_nodes): + ''' creating add_node''' + get_cl_nodes.return_value = ['node1', 'node2'] + get_cl_id.return_value = None + wait.return_value = None + data = self.set_default_args() + # del data['cluster_name'] + data['node_name'] = 'node2' + data['state'] = 'absent' + data['force'] = True + set_module_args(data) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_add') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + remove_node.assert_called_with() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_nodes') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.get_cluster_identity') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster.NetAppONTAPCluster.remove_node') + def test_remove_node_name_idempotent(self, remove_node, get_cl_id, get_cl_nodes): + ''' creating add_node''' + get_cl_nodes.return_value = ['node1', 'node2'] + get_cl_id.return_value = None + data = self.set_default_args() + # del data['cluster_name'] + data['node_name'] = 'node3' + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + my_obj.autosupport_log = Mock(return_value=None) + if not self.use_vsim: + my_obj.server = MockONTAPConnection('cluster_add') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_cluster_apply: %s' % repr(exc.value)) + try: + remove_node.assert_not_called() + except AttributeError: + # not supported with python <= 3.4 + pass + assert not exc.value.args[0]['changed'] + + def test_remove_node_name_and_id(self): + ''' creating add_node''' + data = self.set_default_args() + # del data['cluster_name'] + data['cluster_ip_address'] = '10.10.10.10' + data['node_name'] = 'node3' + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print('Info: test_remove_node_name_and_id: %s' % repr(exc.value)) + msg = 'when state is "absent", parameters are mutually exclusive: cluster_ip_address|node_name' + assert msg in exc.value.args[0]['msg'] + + +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy_9_7_0')), None), + 'is_rest_95': (200, dict(version=dict(generation=9, major=5, minor=0, full='dummy_9_5_0')), None), + 'is_rest_96': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy_9_6_0')), None), + 'is_rest_97': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy_9_7_0')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': ({}, None, None), + 'zero_record': (200, {'records': []}, None), + 'precluster': (500, None, {'message': 'are available in precluster.'}), + 'cluster_identity': (200, {'location': 'Oz', 'name': 'abc'}, None), + 'nodes': (200, {'records': [ + {'name': 'node2', 'uuid': 'uuid2', 'cluster_interfaces': [{'ip': {'address': '10.10.10.2'}}]} + ]}, None), + 'end_of_sequence': (None, None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), +} + + +def set_default_args(use_rest='auto'): + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + cluster_name = 'abc' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'cluster_name': cluster_name, + 'use_rest': use_rest + }) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create(mock_request, patch_ansible): + ''' create cluster ''' + args = dict(set_default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['precluster'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create_timezone(mock_request, patch_ansible): + ''' create cluster ''' + args = dict(set_default_args()) + args['timezone'] = {'name': 'America/Los_Angeles'} + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['precluster'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create_single(mock_request, patch_ansible): + ''' create cluster ''' + args = dict(set_default_args()) + args['single_node_cluster'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['precluster'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + post_call = call('POST', 'cluster', {'return_timeout': 30, 'single_node_cluster': True}, json={'name': 'abc'}, headers=None, files=None) + assert post_call in mock_request.mock_calls + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + args['cluster_location'] = 'Mars' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is True + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_timezone(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + args['timezone'] = {'name': 'America/Los_Angeles'} + args['cluster_location'] = 'Mars' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is True + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_idempotent(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + args['cluster_location'] = 'Oz' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is False + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_add_node(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + args['node_name'] = 'node2' + args['cluster_ip_address'] = '10.10.10.2' + + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['zero_record'], # get nodes + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is True + assert len(mock_request.mock_calls) == 4 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove_node_by_ip(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + # args['node_name'] = 'node2' + args['cluster_ip_address'] = '10.10.10.2' + args['state'] = 'absent' + + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['nodes'], # get nodes + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is True + assert len(mock_request.mock_calls) == 4 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove_node_by_ip_idem(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + # args['node_name'] = 'node2' + args['cluster_ip_address'] = '10.10.10.3' + args['state'] = 'absent' + + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['nodes'], # get nodes + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is False + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove_node_by_name(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + args['node_name'] = 'node2' + # args['cluster_ip_address'] = '10.10.10.2' + args['state'] = 'absent' + args['force'] = True + + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['nodes'], # get nodes + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is True + assert len(mock_request.mock_calls) == 4 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove_node_by_name_idem(mock_request, patch_ansible): + ''' modify cluster location ''' + args = dict(set_default_args()) + args['node_name'] = 'node3' + # args['cluster_ip_address'] = '10.10.10.2' + args['state'] = 'absent' + + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['cluster_identity'], # get + SRR['nodes'], # get nodes + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] is False + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove_node_by_name_rest_96(mock_request, patch_ansible): + ''' revert to ZAPI for 9.6 ''' + args = dict(set_default_args()) + args['node_name'] = 'node3' + # args['cluster_ip_address'] = '10.10.10.2' + args['state'] = 'absent' + + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_96'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + # revert to ZAPI for 9.6 + assert not my_obj.use_rest diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_ha.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_ha.py new file mode 100644 index 000000000..a03f5c5aa --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_ha.py @@ -0,0 +1,140 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for ONTAP Ansible module: na_ontap_cluster_ha ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_ha \ + import NetAppOntapClusterHA as cluster_ha # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +DEFAULT_ARGS = { + 'hostname': '10.10.10.10', + 'username': 'user', + 'password': 'pass', + 'state': 'present', + 'use_rest': 'never' +} + +cluster_ha_enabled = { + 'attributes': { + 'cluster-ha-info': {'ha-configured': 'true'} + } +} + +cluster_ha_disabled = { + 'attributes': { + 'cluster-ha-info': {'ha-configured': 'false'} + } +} + + +ZRR = zapi_responses({ + 'cluster_ha_enabled': build_zapi_response(cluster_ha_enabled), + 'cluster_ha_disabled': build_zapi_response(cluster_ha_disabled) +}) + + +SRR = rest_responses({ + 'cluster_ha_enabled': (200, {"records": [{ + 'configured': True + }], "num_records": 1}, None), + 'cluster_ha_disabled': (200, {"records": [{ + 'configured': False + }], "num_records": 1}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname"] + error = create_module(cluster_ha, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_enable_cluster_ha(): + ''' enable cluster ha ''' + register_responses([ + ('cluster-ha-get', ZRR['cluster_ha_disabled']), + ('cluster-ha-modify', ZRR['success']), + ('cluster-ha-get', ZRR['cluster_ha_enabled']) + ]) + assert create_and_apply(cluster_ha, DEFAULT_ARGS)['changed'] + assert not create_and_apply(cluster_ha, DEFAULT_ARGS)['changed'] + + +def test_disable_cluster_ha(): + ''' disable cluster ha ''' + register_responses([ + ('cluster-ha-get', ZRR['cluster_ha_enabled']), + ('cluster-ha-modify', ZRR['success']), + ('cluster-ha-get', ZRR['cluster_ha_disabled']), + ]) + assert create_and_apply(cluster_ha, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(cluster_ha, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('cluster-ha-get', ZRR['error']), + ('cluster-ha-modify', ZRR['error']), + ('cluster-ha-modify', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/cluster/ha', SRR['generic_error']), + ('PATCH', 'private/cli/cluster/ha', SRR['generic_error']), + ('PATCH', 'private/cli/cluster/ha', SRR['generic_error']) + ]) + ha_obj = create_module(cluster_ha, DEFAULT_ARGS) + assert 'Error fetching cluster HA' in expect_and_capture_ansible_exception(ha_obj.get_cluster_ha_enabled, 'fail')['msg'] + assert 'Error modifying cluster HA to true' in expect_and_capture_ansible_exception(ha_obj.modify_cluster_ha, 'fail', 'true')['msg'] + assert 'Error modifying cluster HA to false' in expect_and_capture_ansible_exception(ha_obj.modify_cluster_ha, 'fail', 'false')['msg'] + + ucm_obj = create_module(cluster_ha, DEFAULT_ARGS, {'use_rest': 'always'}) + assert 'Error fetching cluster HA' in expect_and_capture_ansible_exception(ucm_obj.get_cluster_ha_enabled, 'fail')['msg'] + assert 'Error modifying cluster HA to true' in expect_and_capture_ansible_exception(ucm_obj.modify_cluster_ha, 'fail', 'true')['msg'] + assert 'Error modifying cluster HA to false' in expect_and_capture_ansible_exception(ucm_obj.modify_cluster_ha, 'fail', 'false')['msg'] + + +def test_enable_cluster_ha_rest(): + ''' enable cluster ha in rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/cluster/ha', SRR['cluster_ha_disabled']), + ('PATCH', 'private/cli/cluster/ha', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/cluster/ha', SRR['cluster_ha_enabled']) + ]) + assert create_and_apply(cluster_ha, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + assert not create_and_apply(cluster_ha, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_disable_cluster_ha_rest(): + ''' disable cluster ha in rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/cluster/ha', SRR['cluster_ha_enabled']), + ('PATCH', 'private/cli/cluster/ha', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/cluster/ha', SRR['cluster_ha_disabled']), + ]) + args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(cluster_ha, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(cluster_ha, DEFAULT_ARGS, args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py new file mode 100644 index 000000000..7551a619e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_cluster_peer.py @@ -0,0 +1,305 @@ +''' unit tests ONTAP Ansible module: na_ontap_cluster_peer ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, patch_ansible, create_and_apply +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cluster_peer \ + import NetAppONTAPClusterPeer as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def update_cluster_peer_info_zapi(cluster_name, peer_addresses): + return { + 'num-records': 1, + 'attributes-list': { + 'cluster-peer-info': { + 'cluster-name': cluster_name, + 'peer-addresses': peer_addresses + } + } + } + + +ZRR = zapi_responses({ + 'cluster_peer_info_source': build_zapi_response(update_cluster_peer_info_zapi('cluster1', '1.2.3.6,1.2.3.7')), + 'cluster_peer_info_remote': build_zapi_response(update_cluster_peer_info_zapi('cluster2', '1.2.3.4,1.2.3.5')) +}) + + +DEFAULT_ARGS_ZAPI = { + 'source_intercluster_lifs': '1.2.3.4,1.2.3.5', + 'dest_intercluster_lifs': '1.2.3.6,1.2.3.7', + 'passphrase': 'netapp123', + 'dest_hostname': '10.20.30.40', + 'dest_cluster_name': 'cluster2', + 'encryption_protocol_proposed': 'none', + 'ipspace': 'Default', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never', + 'feature_flags': {'no_cserver_ems': True} +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('cluster-peer-get-iter', ZRR['empty']), + ('cluster-peer-get-iter', ZRR['empty']), + ('cluster-peer-create', ZRR['empty']), + ('cluster-peer-create', ZRR['empty']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS_ZAPI) + + +def test_create_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('cluster-peer-get-iter', ZRR['cluster_peer_info_source']), + ('cluster-peer-get-iter', ZRR['cluster_peer_info_remote']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS_ZAPI) + + +def test_successful_delete(): + ''' Test delete existing cluster peer ''' + module_args = { + 'state': 'absent', + 'source_cluster_name': 'cluster1' + } + register_responses([ + ('cluster-peer-get-iter', ZRR['cluster_peer_info_source']), + ('cluster-peer-get-iter', ZRR['cluster_peer_info_remote']), + ('cluster-peer-delete', ZRR['empty']), + ('cluster-peer-delete', ZRR['empty']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS_ZAPI, module_args) + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + module_args = { + 'state': 'absent', + 'source_cluster_name': 'cluster1' + } + register_responses([ + ('cluster-peer-get-iter', ZRR['empty']), + ('cluster-peer-get-iter', ZRR['empty']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS_ZAPI, module_args) + + +def test_error_get_cluster_peer(): + ''' Test get error ''' + register_responses([ + ('cluster-peer-get-iter', ZRR['error']), + ]) + error = create_and_apply(my_module, DEFAULT_ARGS_ZAPI, fail=True)['msg'] + assert 'Error fetching cluster peer source: NetApp API failed. Reason - 12345:synthetic error for UT purpose' == error + + +def test_error_delete_cluster_peer(): + ''' Test delete error ''' + module_args = { + 'state': 'absent', + 'source_cluster_name': 'cluster1' + } + register_responses([ + ('cluster-peer-get-iter', ZRR['cluster_peer_info_source']), + ('cluster-peer-get-iter', ZRR['cluster_peer_info_remote']), + ('cluster-peer-delete', ZRR['error']) + ]) + error = create_and_apply(my_module, DEFAULT_ARGS_ZAPI, module_args, fail=True)['msg'] + assert 'Error deleting cluster peer cluster2: NetApp API failed. Reason - 12345:synthetic error for UT purpose' == error + + +def test_error_create_cluster_peer(): + ''' Test create error ''' + register_responses([ + ('cluster-peer-get-iter', ZRR['empty']), + ('cluster-peer-get-iter', ZRR['empty']), + ('cluster-peer-create', ZRR['error']) + ]) + error = create_and_apply(my_module, DEFAULT_ARGS_ZAPI, fail=True)['msg'] + assert 'Error creating cluster peer [\'1.2.3.6\', \'1.2.3.7\']: NetApp API failed. Reason - 12345:synthetic error for UT purpose' == error + + +SRR = rest_responses({ + 'cluster_peer_dst': (200, {"records": [ + { + "uuid": "1e698aba-2aa6-11ec-b7be-005056b366e1", + "name": "mohan9cluster2", + "remote": { + "name": "mohan9cluster2", + "serial_number": "1-80-000011", + "ip_addresses": ["10.193.179.180"] + } + } + ], "num_records": 1}, None), + 'cluster_peer_src': (200, {"records": [ + { + "uuid": "1fg98aba-2aa6-11ec-b7be-005fgvb366e1", + "name": "mohanontap98cluster", + "remote": { + "name": "mohanontap98cluster", + "serial_number": "1-80-000031", + "ip_addresses": ["10.193.179.57"] + } + } + ], "num_records": 1}, None), + 'passphrase_response': (200, {"records": [ + { + "uuid": "4b71a7fb-45ff-11ec-95ea-005056b3b297", + "name": "", + "authentication": { + "passphrase": "ajdHOvAFSs0LOO0S27GtJZfV", + "expiry_time": "2022-02-22T22:30:18-05:00" + } + } + ], "num_records": 1}, None) +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'source_cluster_name': 'mohan9cluster2', + 'source_intercluster_lifs': ['10.193.179.180'], + 'dest_hostname': '10.193.179.197', + 'dest_cluster_name': 'mohanontap98cluster', + 'dest_intercluster_lifs': ['10.193.179.57'], + 'passphrase': 'ontapcluster_peer', + 'encryption_protocol_proposed': 'none', + 'ipspace': 'Default' +} + + +def test_successful_create_rest(): + ''' Test successful create ''' + args = DEFAULT_ARGS + del args['encryption_protocol_proposed'] + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('POST', 'cluster/peers', SRR['empty_good']), + ('POST', 'cluster/peers', SRR['empty_good']) + ]) + assert create_and_apply(my_module, args) + + +def test_create_idempotency_rest(): + ''' Test successful create idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['cluster_peer_src']), + ('GET', 'cluster/peers', SRR['cluster_peer_dst']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS) + + +def test_successful_create_without_passphrase_rest(): + ''' Test successful create ''' + args = DEFAULT_ARGS + del args['passphrase'] + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('POST', 'cluster/peers', SRR['passphrase_response']), + ('POST', 'cluster/peers', SRR['empty_good']) + ]) + assert create_and_apply(my_module, args) + + +def test_successful_delete_rest(): + ''' Test successful delete ''' + module_args = {'state': 'absent'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['cluster_peer_src']), + ('GET', 'cluster/peers', SRR['cluster_peer_dst']), + ('DELETE', 'cluster/peers/1fg98aba-2aa6-11ec-b7be-005fgvb366e1', SRR['empty_good']), + ('DELETE', 'cluster/peers/1e698aba-2aa6-11ec-b7be-005056b366e1', SRR['empty_good']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, module_args) + + +def test_delete_idempotency_rest(): + ''' Test delete idempotency ''' + module_args = {'state': 'absent'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('GET', 'cluster/peers', SRR['empty_records']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, module_args) + + +def test_error_get_cluster_peer_rest(): + ''' Test get error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['generic_error']), + ]) + error = create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'calling: cluster/peers: got Expected error.' == error + + +def test_error_delete_cluster_peer_rest(): + ''' Test delete error ''' + module_args = {'state': 'absent'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['cluster_peer_src']), + ('GET', 'cluster/peers', SRR['cluster_peer_dst']), + ('DELETE', 'cluster/peers/1fg98aba-2aa6-11ec-b7be-005fgvb366e1', SRR['generic_error']), + ]) + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'calling: cluster/peers/1fg98aba-2aa6-11ec-b7be-005fgvb366e1: got Expected error.' == error + + +def test_error_create_cluster_peer_rest(): + ''' Test create error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('GET', 'cluster/peers', SRR['empty_records']), + ('POST', 'cluster/peers', SRR['generic_error']), + ]) + error = create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'calling: cluster/peers: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py new file mode 100644 index 000000000..38bc6ec96 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_command.py @@ -0,0 +1,246 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP Command Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_command import NetAppONTAPCommand as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'use_rest', +} + + +def cli_output(priv, result, translate=True): + prefix = 'NetApp Release' + print('HERE', 'start') + if priv == 'advanced': + prefix = '\n' + prefix + if result == "u'77'": + result = u'77' + elif result == "b'77'": + print('HERE', b'77') + result = b'77' + elif result is None: + result = b'7' + return { + 'cli-output': prefix, + 'cli-result-value': result + } + + +def build_zapi_response_raw(contents): + """ when testing special encodings, we cannot use build_zapi_response as translate_struct converts to text + """ + if netapp_utils.has_netapp_lib(): + xml = netapp_utils.zapi.NaElement('results') + xml.add_attr('status', 'status_ok') + xml.add_new_child('cli-output', contents['cli-output']) + xml.add_new_child('cli-result-value', contents['cli-result-value']) + # print('XML ut:', xml.to_string()) + xml.add_attr('status', 'passed') + return (xml, 'valid') + return ('netapp-lib is required', 'invalid') + + +ZRR = zapi_responses({ + 'cli_version': build_zapi_response_raw(cli_output(None, None)), + 'cli_version_advanced': build_zapi_response_raw(cli_output('advanced', None)), + 'cli_version_77': build_zapi_response(cli_output(None, '77')), + 'cli_version_b77': build_zapi_response_raw(cli_output(None, "b'77'")), + 'cli_version_u77': build_zapi_response_raw(cli_output(None, "u'77'")), +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + } + error = 'missing required arguments: command' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_default_priv(): + ''' make sure privilege is not required ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version']), + ]) + module_args = { + 'command': 'version', + } + msg = call_main(my_main, DEFAULT_ARGS, module_args)['msg'] + needle = b'NetApp Release' + assert needle in msg + print('Version (raw): %s' % msg) + + +def test_admin_priv(): + ''' make sure admin is accepted ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version']), + ]) + module_args = { + 'command': 'version', + 'privilege': 'admin', + } + msg = call_main(my_main, DEFAULT_ARGS, module_args)['msg'] + needle = b'NetApp Release' + assert needle in msg + print('Version (raw): %s' % msg) + + +def test_advanced_priv(): + ''' make sure advanced is not required ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version_advanced']), + ]) + module_args = { + 'command': 'version', + 'privilege': 'advanced', + } + msg = call_main(my_main, DEFAULT_ARGS, module_args)['msg'] + # Interestingly, the ZAPI returns a slightly different response + needle = b'\nNetApp Release' + assert needle in msg + print('Version (raw): %s' % msg) + + +def get_dict_output(extra_args=None): + ''' get result value after calling command module ''' + module_args = { + 'command': 'version', + 'return_dict': 'true', + } + if extra_args: + module_args.update(extra_args) + dict_output = call_main(my_main, DEFAULT_ARGS, module_args)['msg'] + print('dict_output: %s' % repr(dict_output)) + return dict_output + + +def test_dict_output_77(): + ''' make sure correct value is returned ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version_77']), + ]) + result = '77' + assert get_dict_output()['result_value'] == int(result) + + +def test_dict_output_b77(): + ''' make sure correct value is returned ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version_b77']), + ]) + result = b'77' + assert get_dict_output()['result_value'] == int(result) + + +def test_dict_output_u77(): + ''' make sure correct value is returned ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version_u77']), + ]) + result = "u'77'" + assert get_dict_output()['result_value'] == int(eval(result)) + + +def test_dict_output_exclude(): + ''' make sure correct value is returned ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version']), + ('ZAPI', 'system-cli', ZRR['cli_version']), + ]) + dict_output = get_dict_output({'exclude_lines': 'NetApp Release'}) + assert len(dict_output['stdout_lines']) == 1 + assert len(dict_output['stdout_lines_filter']) == 0 + dict_output = get_dict_output({'exclude_lines': 'whatever'}) + assert len(dict_output['stdout_lines']) == 1 + assert len(dict_output['stdout_lines_filter']) == 1 + + +def test_dict_output_include(): + ''' make sure correct value is returned ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['cli_version']), + ('ZAPI', 'system-cli', ZRR['cli_version']), + ]) + dict_output = get_dict_output({'include_lines': 'NetApp Release'}) + assert len(dict_output['stdout_lines']) == 1 + assert len(dict_output['stdout_lines_filter']) == 1 + dict_output = get_dict_output({'include_lines': 'whatever'}) + assert len(dict_output['stdout_lines']) == 1 + assert len(dict_output['stdout_lines_filter']) == 0 + + +def test_check_mode(): + ''' make sure nothing is done ''' + register_responses([ + ]) + module_args = { + 'command': 'version', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.module.check_mode = True + msg = expect_and_capture_ansible_exception(my_obj.apply, 'exit')['msg'] + needle = "Would run command: '['version']'" + assert needle in msg + print('Version (raw): %s' % msg) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + module_args = { + 'command': 'version', + } + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_zapi_errors(): + ''' make sure nothing is done ''' + register_responses([ + ('ZAPI', 'system-cli', ZRR['error']), + ('ZAPI', 'system-cli', ZRR['cli_version']), + ('ZAPI', 'system-cli', ZRR['cli_version']), + ('ZAPI', 'system-cli', ZRR['cli_version']), + + ]) + module_args = { + 'command': 'version', + } + error = zapi_error_message("Error running command ['version']") + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + # EMS error is ignored + assert b'NetApp Release' in call_main(my_main, DEFAULT_ARGS, module_args, fail=False)['msg'] + # EMS cserver error is ignored + assert b'NetApp Release' in call_main(my_main, DEFAULT_ARGS, module_args, fail=False)['msg'] + # EMS vserver error is ignored + module_args = { + 'command': 'version', + 'vserver': 'svm' + } + assert b'NetApp Release' in call_main(my_main, DEFAULT_ARGS, module_args, fail=False)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_debug.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_debug.py new file mode 100644 index 000000000..55d6f2727 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_debug.py @@ -0,0 +1,344 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP debug Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_debug \ + import NetAppONTAPDebug as my_module # module under test +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + assert_no_warnings, assert_no_warnings_except_zapi, call_main, create_and_apply, create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, zapi_responses + +# not available on 2.6 anymore +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +DEFAULT_ARGS = { + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'vserver': 'vserver', +} + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy_9_8_0')), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'one_vserver_record_with_intf': (200, { + "records": [{ + 'name': 'vserver1', + 'ip_interfaces': [ + dict(services=['management'])], + }], + 'num_records': 1 + }, None), + 'one_user_record': (200, { + "records": [{ + 'name': 'user1', + 'applications': [ + dict(application='http'), + dict(application='ontapi'), + ], + 'locked': False, + 'owner': {'name': 'vserver'} + }], + 'num_records': 1 + }, None), + 'one_user_record_admin': (200, { + "records": [{ + 'name': 'user1', + 'applications': [ + dict(application='http'), + dict(application='ontapi'), + ], + 'locked': False, + 'owner': {'name': 'vserver'}, + 'role': {'name': 'admin'} + }], + 'num_records': 1 + }, None), + 'ConnectTimeoutError': (400, None, "Connection timed out"), + 'Name or service not known': (400, None, "Name or service not known"), + 'not_authorized': (400, None, "not authorized for that command"), +}, allow_override=False) + +ZRR = zapi_responses({ + 'ConnectTimeoutError': build_zapi_error('123', 'ConnectTimeoutError'), + 'Name or service not known': build_zapi_error('123', 'Name or service not known'), +}, allow_override=False) + + +if netapp_utils.has_netapp_lib(): + REST_ZAPI_FLOW = [ + ('system-get-version', ZRR['version']), # get version + ('GET', 'cluster', SRR['is_rest_9_8']), # get_version + ] +else: + REST_ZAPI_FLOW = [ + ('GET', 'cluster', SRR['is_rest_9_8']), # get_version + ] + + +def test_success_no_vserver(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record_admin']) # get user + ]) + args = dict(DEFAULT_ARGS) + args.pop('vserver') + results = create_and_apply(my_module, args) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert 'msg' in results + assert "NOTE: application console not found for user: user1: ['http', 'ontapi']" in results['notes'] + assert 'ZAPI connected successfully.' in results['msg'] + + +def test_success_with_vserver(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record']), # get user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_svms + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS) + print('Info: %s' % results) + # assert results['changed'] is False + assert_no_warnings_except_zapi() + assert 'notes' not in results + + +def test_fail_with_vserver_locked(): + ''' test get''' + user = copy.deepcopy(SRR['one_user_record']) + user[1]['records'][0]['locked'] = True + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record']), # get user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_svms + ('GET', 'security/accounts', user) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert 'user: user1 is locked on vserver: vserver' in results['notes'][0] + + +def test_fail_with_vserver_missing_app(): + ''' test get''' + user = copy.deepcopy(SRR['one_user_record']) + user[1]['records'][0]['applications'] = [dict(application='http')] + + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record']), # get user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_svms + ('GET', 'security/accounts', user) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert 'application ontapi not found for user: user1' in results['notes'][0] + assert 'Error: no unlocked user for ontapi on vserver: vserver' in results['msg'] + + +def test_fail_with_vserver_list_user_not_found(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record']), # get user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_svms + ('GET', 'security/accounts', SRR['empty_records']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'Error getting accounts for: vserver: none found' in results['msg'] + + +def test_fail_with_vserver_list_user_error_on_get_users(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record']), # get user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_svms + ('GET', 'security/accounts', SRR['generic_error']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'Error getting accounts for: vserver: calling: security/accounts: got Expected error.' in results['msg'] + + +def test_success_with_vserver_list_user_not_authorized(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record']), # get user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_svms + ('GET', 'security/accounts', SRR['not_authorized']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'Not autorized to get accounts for: vserver: calling: security/accounts: got not authorized for that command.' in results['msg'] + + +def test_fail_with_vserver_no_interface(): + ''' test get''' + vserver = copy.deepcopy(SRR['one_vserver_record_with_intf']) + vserver[1]['records'][0].pop('ip_interfaces') + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record_admin']), # get user + ('GET', 'svm/svms', vserver), # get_svms + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert "NOTE: application console not found for user: user1: ['http', 'ontapi']" in results['notes'] + assert 'Error vserver is not associated with a network interface: vserver' in results['msg'] + + +def test_fail_with_vserver_not_found(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record_admin']), # get user + ('GET', 'svm/svms', SRR['empty_records']), # get_svms + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert "NOTE: application console not found for user: user1: ['http', 'ontapi']" in results['notes'] + assert 'Error getting vserver in list_interfaces: vserver: not found' in results['msg'] + + +def test_fail_with_vserver_error_on_get_svms(): + ''' test get''' + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record_admin']), # get user + ('GET', 'svm/svms', SRR['generic_error']), # get_svms + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert "NOTE: application console not found for user: user1: ['http', 'ontapi']" in results['notes'] + assert 'Error getting vserver in list_interfaces: vserver: calling: svm/svms: got Expected error.' in results['msg'] + + +def test_note_with_vserver_no_management_service(): + ''' test get''' + vserver = copy.deepcopy(SRR['one_vserver_record_with_intf']) + vserver[1]['records'][0]['ip_interfaces'][0]['services'] = ['data_core'] + register_responses(REST_ZAPI_FLOW + [ + ('GET', 'security/accounts', SRR['one_user_record_admin']), # get user + ('GET', 'svm/svms', vserver), # get_svms + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + + results = create_and_apply(my_module, DEFAULT_ARGS) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' in results + assert 'no management policy in services' in results['notes'][2] + + +def test_fail_zapi_error(): + ''' test get''' + register_responses([ + ('system-get-version', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_8']), # get_version + ('GET', 'security/accounts', SRR['one_user_record']), # get_user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_vservers + ('GET', 'security/accounts', SRR['one_user_record']), # get_users + ('system-get-version', ZRR['ConnectTimeoutError']), + ('GET', 'cluster', SRR['is_rest_9_8']), # get_version + ('GET', 'security/accounts', SRR['one_user_record']), # get_user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_vservers + ('GET', 'security/accounts', SRR['one_user_record']), # get_users + ('system-get-version', ZRR['Name or service not known']), + ('GET', 'cluster', SRR['is_rest_9_8']), # get_version + ('GET', 'security/accounts', SRR['one_user_record']), # get_user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_vservers + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' not in results + assert 'Unclassified, see msg' in results['msg'] + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + assert 'Error in hostname - Address does not exist or is not reachable: NetApp API failed. Reason - 123:ConnectTimeoutError' in results['msg'] + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + assert 'Error in hostname - DNS name cannot be resolved: NetApp API failed. Reason - 123:Name or service not known' in results['msg'] + + +def test_fail_rest_error(): + ''' test get''' + register_responses([ + ('system-get-version', ZRR['version']), + ('GET', 'cluster', SRR['is_zapi']), # get_version + ('system-get-version', ZRR['version']), + ('GET', 'cluster', SRR['ConnectTimeoutError']), # get_version + ('system-get-version', ZRR['version']), + ('GET', 'cluster', SRR['Name or service not known']), # get_version + ]) + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' not in results + assert 'Other error for hostname: 10.10.10.10 using REST: Unreachable.' in results['msg'] + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + assert 'Error in hostname - Address does not exist or is not reachable: Connection timed out' in results['msg'] + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + assert 'Error in hostname - DNS name cannot be resolved: Name or service not known' in results['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + ''' test get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8']), # get_version + ('GET', 'security/accounts', SRR['one_user_record']), # get_user + ('GET', 'svm/svms', SRR['one_vserver_record_with_intf']), # get_vservers + ('GET', 'security/accounts', SRR['one_user_record']) # get_users + ]) + + mock_has_netapp_lib.return_value = False + + results = create_and_apply(my_module, DEFAULT_ARGS, fail=True) + print('Info: %s' % results) + assert_no_warnings_except_zapi() + assert 'notes' not in results + assert 'Install the python netapp-lib module or a missing dependency' in results['msg'][0] + + +def test_check_connection_internal_error(): + ''' expecting REST or ZAPI ''' + error = 'Internal error, unexpected connection type: rest' + assert error == expect_and_capture_ansible_exception(create_module(my_module, DEFAULT_ARGS).check_connection, 'fail', 'rest')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disk_options.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disk_options.py new file mode 100644 index 000000000..d729b4edd --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disk_options.py @@ -0,0 +1,151 @@ +# (c) 2021-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP fpolicy ext engine Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + call_main, create_and_apply, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_disk_options \ + import NetAppOntapDiskOptions as my_module, main as my_main # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'node': 'node1', + 'bkg_firmware_update': False, + 'autocopy': False, + 'autoassign': False, + 'autoassign_policy': 'default', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' +} + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'one_disk_options_record': (200, { + "records": [{ + 'node': 'node1', + 'bkg_firmware_update': False, + 'autocopy': False, + 'autoassign': False, + 'autoassign_policy': 'default' + }] + }, None), + 'one_disk_options_record_on_off': (200, { + "records": [{ + 'node': 'node1', + 'bkg_firmware_update': 'on', + 'autocopy': 'off', + 'autoassign': 'on', + 'autoassign_policy': 'default' + }] + }, None), + 'one_disk_options_record_bad_value': (200, { + "records": [{ + 'node': 'node1', + 'bkg_firmware_update': 'whatisthis', + 'autocopy': 'off', + 'autoassign': 'on', + 'autoassign_policy': 'default' + }] + }, None) + +}, False) + + +def test_rest_modify_no_action(): + ''' modify fpolicy ext engine ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['one_disk_options_record']), + ]) + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_rest_modify_prepopulate(): + ''' modify disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['one_disk_options_record']), + ('PATCH', 'private/cli/storage/disk/option', SRR['empty_good']), + ]) + args = {'autoassign': True, 'autocopy': True, 'bkg_firmware_update': True} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_rest_modify_on_off(): + ''' modify disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['one_disk_options_record_on_off']), + ('PATCH', 'private/cli/storage/disk/option', SRR['empty_good']), + ]) + args = {'autoassign': True, 'autocopy': True, 'bkg_firmware_update': True} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_error_rest_get_not_on_off(): + ''' modify disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['one_disk_options_record_bad_value']), + ]) + args = {'autoassign': True, 'autocopy': True, 'bkg_firmware_update': True} + assert create_and_apply(my_module, DEFAULT_ARGS, args, fail=True)['msg'] == 'Unexpected value for field bkg_firmware_update: whatisthis' + + +def test_error_rest_no_zapi_support(): + ''' modify disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ]) + args = {'use_rest': 'auto'} + assert "na_ontap_disk_options only supports REST, and requires ONTAP 9.6 or later." in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_error_get(): + ''' get disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['generic_error']), + ]) + args = {'use_rest': 'auto'} + assert "calling: private/cli/storage/disk/option: got Expected error." in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_error_get_empty(): + ''' get disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['empty_records']), + ]) + args = {'use_rest': 'auto'} + assert "Error on GET private/cli/storage/disk/option, no record." == call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_error_patch(): + ''' modify disk options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/storage/disk/option', SRR['one_disk_options_record_on_off']), + ('PATCH', 'private/cli/storage/disk/option', SRR['generic_error']), + ]) + args = {'use_rest': 'auto'} + assert "calling: private/cli/storage/disk/option: got Expected error." in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disks.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disks.py new file mode 100644 index 000000000..b59ae7c83 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_disks.py @@ -0,0 +1,822 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP disks Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_disks \ + import NetAppOntapDisks as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + + try: + container_type = self.xml_in['query']['storage-disk-info']['disk-raid-info']['container-type'] + except LookupError: + container_type = None + try: + get_owned_disks = self.xml_in['query']['storage-disk-info']['disk-ownership-info']['home-node-name'] + except LookupError: + get_owned_disks = None + + api_call = self.xml_in.get_name() + + if self.type == 'fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + elif api_call == 'storage-disk-get-iter': + if container_type == 'spare': + xml = self.home_spare_disks() + elif get_owned_disks: + xml = self.owned_disks() + else: + xml = self.partner_spare_disks() + elif api_call == 'cf-status': + xml = self.partner_node_name() + self.xml_out = xml + return xml + + @staticmethod + def owned_disks(): + ''' build xml data for disk-inventory-info owned disks ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': [ + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.8' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.7' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.10' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.25' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.18' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.0' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.6' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.11' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.12' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.13' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.23' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.4' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.9' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.21' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.16' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.19' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.2' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.14' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.20' + } + } + } + ], + 'num-records': '19' + } + xml.translate_struct(data) + return xml + + @staticmethod + def home_spare_disks(): + ''' build xml data for disk-inventory-info home spare disks ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': [ + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.9' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.20' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.9' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.22' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.13' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.23' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.16' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.18' + } + } + } + ], + 'num-records': '8' + } + xml.translate_struct(data) + return xml + + @staticmethod + def partner_spare_disks(): + ''' build xml data for disk-inventory-info partner spare disks ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': [ + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.7' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.15' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.21' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.23' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.19' + } + } + }, + { + 'storage-disk-info': { + 'disk-inventory-info': { + 'disk-cluster-name': '1.0.11' + } + } + } + ], + 'num-records': '6' + } + xml.translate_struct(data) + return xml + + @staticmethod + def partner_node_name(): + ''' build xml data for partner node name''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'cf-status': { + 'partner-name': 'node2' + } + } + xml.translate_struct(data) + return xml + + @staticmethod + def unassigned_disk_count(): + ''' build xml data for the count of unassigned disks on a node ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'num-records': '0' + } + xml.translate_struct(data) + return xml + + +def default_args(): + args = { + 'disk_count': 15, + 'node': 'node1', + 'disk_type': 'SAS', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'owned_disk_record': ( + 200, { + 'records': [ + { + "name": "1.0.8", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.7", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.10", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.18", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.0", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.6", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.11", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.12", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.13", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.23", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.22", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.4", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.9", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.21", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.16", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.19", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.2", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.14", + "type": "sas", + "container_type": "aggregate", + "home_node": { + "name": "node1" + } + }, + { + "name": "1.0.20", + "type": "sas", + "container_type": "spare", + "home_node": { + "name": "node1" + } + } + ], + 'num_records': 19}, + None), + + # 'owned_disk_record': (200, {'num_records': 15}), + 'unassigned_disk_record': ( + 200, { + 'records': [], + 'num_records': 0}, + None), + 'home_spare_disk_info_record': ( + 200, {'records': [ + { + 'name': '1.0.20', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.9', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.22', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.13', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.17', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.23', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.16', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.18', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node1'}} + ], + 'num_records': 8, + '_links': {'self': {'href': '/api/storage/disks?home_node.name=node1&container_type=spare&type=SAS&fields=name'}}}, + None), + + 'partner_node_name_record': ( + 200, {'records': [ + { + 'uuid': 'c345c182-a6a0-11eb-af7b-00a0984839de', + 'name': 'node2', + 'ha': { + 'partners': [ + {'name': 'node1'} + ] + } + } + ], + 'num_records': 1}, + None), + + 'partner_spare_disk_info_record': ( + 200, {'records': [ + { + 'name': '1.0.7', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.15', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.21', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.23', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.19', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.11', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + } + ], + 'num_records': 6}, + None) +} + + +def test_successful_assign(patch_ansible): + ''' successful assign and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['disk_count'] = '20' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + my_obj.ems_log_event = Mock(return_value=None) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + # mock_create.assert_called_with() + args['use_rest'] = 'never' + args['disk_count'] = '19' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + my_obj.ems_log_event = Mock(return_value=None) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +def test_successful_unassign(patch_ansible): + ''' successful assign and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['disk_count'] = '17' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + my_obj.ems_log_event = Mock(return_value=None) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + # mock_create.assert_called_with() + args['use_rest'] = 'never' + args['disk_count'] = '19' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + my_obj.ems_log_event = Mock(return_value=None) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_ensure_get_called(patch_ansible): + ''' test get_disks ''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + print('starting') + my_obj = my_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = MockONTAPConnection() + assert my_obj.get_disks is not None + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument ##WHAT DOES THIS METHOD DO + ''' create scope ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +def test_if_all_methods_catch_exception(patch_ansible): + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_disks(container_type='owned', node='node1') + assert 'Error getting disk ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_disks(container_type='unassigned') + assert 'Error getting disk ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_disks(container_type='spare', node='node1') + assert 'Error getting disk ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_partner_node_name() + assert 'Error getting partner name ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.disk_assign(needed_disks=2) + assert 'Error assigning disks ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.disk_unassign(['1.0.0', '1.0.1']) + assert 'Error unassigning disks ' in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_disk_record'], + SRR['unassigned_disk_record'], + SRR['home_spare_disk_info_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_disk_info_record'], + SRR['empty_good'], # unassign + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 8 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_unassign(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['disk_count'] = 17 + print(args) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_disk_record'], + SRR['unassigned_disk_record'], + SRR['home_spare_disk_info_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_disk_info_record'], + SRR['empty_good'], # unassign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 6 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' disk_count matches arguments, do nothing ''' + args = dict(default_args()) + args['disk_count'] = 19 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_disk_record'], + SRR['unassigned_disk_record'], + SRR['home_spare_disk_info_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 4 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py new file mode 100644 index 000000000..c592f5c88 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_dns.py @@ -0,0 +1,388 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_dns''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import call_main, create_module, expect_and_capture_ansible_exception,\ + patch_ansible, assert_warning_was_raised, print_warnings + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_dns import main as my_main, NetAppOntapDns as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'dns_record': (200, {"records": [{"domains": ['test.com'], + "servers": ['0.0.0.0'], + "svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}}]}, None), + 'cluster_data': (200, {"dns_domains": ['test.com'], + "name_servers": ['0.0.0.0'], + "name": "cserver", + "uuid": "C2c9e252-41be-11e9-81d5-00a0986138f7"}, None), + 'cluster_name': (200, {"name": "cserver", + "uuid": "C2c9e252-41be-11e9-81d5-00a0986138f7"}, None), +}) + +dns_info = { + 'attributes': { + 'net-dns-info': { + 'name-servers': [{'ip-address': '0.0.0.0'}], + 'domains': [{'string': 'test.com'}], + 'skip-config-validation': 'true' + } + } +} + + +ZRR = zapi_responses({ + 'dns_info': build_zapi_response(dns_info), + 'error_15661': build_zapi_error(15661, 'not_found'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'nameservers': ['0.0.0.0'], + 'domains': ['test.com'], +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + } + error = 'Error: vserver is a required parameter with ZAPI.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_zapi_get_error(): + register_responses([ + ('ZAPI', 'net-dns-get', ZRR['error']), + ('ZAPI', 'net-dns-get', ZRR['error_15661']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + # get + error = zapi_error_message('Error getting DNS info') + assert error in expect_and_capture_ansible_exception(my_obj.get_dns, 'fail')['msg'] + assert my_obj.get_dns() is None + + +def test_idempotent_modify_dns(): + register_responses([ + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_zapi_modify_dns(): + register_responses([ + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + ('ZAPI', 'net-dns-modify', ZRR['success']), + # idempotency + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + # error + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + ('ZAPI', 'net-dns-modify', ZRR['error']), + ]) + module_args = { + 'domains': ['new_test.com'], + 'nameservers': ['1.2.3.4'], + 'skip_validation': True, + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + 'domains': ['test.com'], + 'skip_validation': True, + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + 'domains': ['new_test.com'], + 'nameservers': ['1.2.3.4'], + 'skip_validation': True, + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + error = zapi_error_message('Error modifying dns') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_zapi_create_dns(): + register_responses([ + ('ZAPI', 'net-dns-get', ZRR['empty']), + ('ZAPI', 'net-dns-create', ZRR['success']), + # idempotency + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + # error + ('ZAPI', 'net-dns-get', ZRR['empty']), + ('ZAPI', 'net-dns-create', ZRR['error']), + ]) + module_args = { + 'domains': ['test.com'], + 'skip_validation': True, + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + error = zapi_error_message('Error creating dns') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_zapi_delete_dns(): + register_responses([ + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + ('ZAPI', 'net-dns-destroy', ZRR['success']), + # idempotency + ('ZAPI', 'net-dns-get', ZRR['empty']), + # error + ('ZAPI', 'net-dns-get', ZRR['dns_info']), + ('ZAPI', 'net-dns-destroy', ZRR['error']), + ]) + module_args = { + 'domains': ['new_test.com'], + 'state': 'absent', + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + error = zapi_error_message('Error destroying dns') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_error(): + module_args = { + 'use_rest': 'always', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_9_9_1']), + # create + ('PATCH', 'cluster', SRR['generic_error']), + ('PATCH', 'cluster', SRR['generic_error']), + ('POST', 'name-services/dns', SRR['generic_error']), + # delete + ('DELETE', 'name-services/dns/uuid', SRR['generic_error']), + # read + ('GET', 'name-services/dns', SRR['generic_error']), + # modify + ('PATCH', 'cluster', SRR['generic_error']), + ('PATCH', 'name-services/dns/uuid', SRR['generic_error']), + ]) + error = rest_error_message('Error getting cluster info', 'cluster') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + # create + my_obj.is_cluster = True + error = rest_error_message('Error updating cluster DNS options', 'cluster') + assert error in expect_and_capture_ansible_exception(my_obj.create_dns_rest, 'fail')['msg'] + my_obj.is_cluster = False + # still cluster scope, as verserver is not set + assert error in expect_and_capture_ansible_exception(my_obj.create_dns_rest, 'fail')['msg'] + my_obj.parameters['vserver'] = 'vserver' + error = rest_error_message('Error creating DNS service', 'name-services/dns') + assert error in expect_and_capture_ansible_exception(my_obj.create_dns_rest, 'fail')['msg'] + # delete + my_obj.is_cluster = True + error = 'Error: cluster scope when deleting DNS with REST requires ONTAP 9.9.1 or later.' + assert error in expect_and_capture_ansible_exception(my_obj.destroy_dns_rest, 'fail', {})['msg'] + my_obj.is_cluster = False + error = rest_error_message('Error deleting DNS service', 'name-services/dns/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.destroy_dns_rest, 'fail', {'uuid': 'uuid'})['msg'] + # read, cluster scope + del my_obj.parameters['vserver'] + error = rest_error_message('Error getting DNS service', 'name-services/dns') + assert error in expect_and_capture_ansible_exception(my_obj.get_dns_rest, 'fail')['msg'] + # modify + dns_attrs = { + 'domains': [], + 'nameservers': [], + 'uuid': 'uuid', + } + my_obj.is_cluster = True + error = rest_error_message('Error updating cluster DNS options', 'cluster') + assert error in expect_and_capture_ansible_exception(my_obj.modify_dns_rest, 'fail', dns_attrs)['msg'] + my_obj.is_cluster = False + error = rest_error_message('Error modifying DNS configuration', 'name-services/dns/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.modify_dns_rest, 'fail', dns_attrs)['msg'] + + +def test_rest_successfully_create(): + module_args = { + 'use_rest': 'always', + 'vserver': 'svm_abc', + 'skip_validation': True + } + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/dns', SRR['zero_records']), + ('POST', 'name-services/dns', SRR['success']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_create_is_cluster_vserver(): + module_args = { + 'use_rest': 'always', + 'vserver': 'cserver' + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['zero_records']), + ('GET', 'cluster', SRR['cluster_name']), + ('PATCH', 'cluster', SRR['empty_good']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_idempotent_create_dns(): + module_args = { + 'use_rest': 'always', + 'vserver': 'svm_abc', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['dns_record']), + ]) + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_destroy(): + module_args = { + 'state': 'absent', + 'use_rest': 'always', + 'vserver': 'svm_abc', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['dns_record']), + ('DELETE', 'name-services/dns/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['success']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_idempotently_destroy(): + module_args = { + 'state': 'absent', + 'use_rest': 'always', + 'vserver': 'svm_abc', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['zero_records']), + ('GET', 'cluster', SRR['cluster_data']), + ]) + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_modify(): + module_args = { + 'domains': 'new_test.com', + 'state': 'present', + 'use_rest': 'always', + 'vserver': 'svm_abc' + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['dns_record']), + ('PATCH', 'name-services/dns/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['success']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_modify_is_cluster_vserver(): + module_args = { + 'domains': 'new_test.com', + 'state': 'present', + 'use_rest': 'always', + 'vserver': 'cserver' + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['zero_records']), + ('GET', 'cluster', SRR['cluster_data']), + ('PATCH', 'cluster', SRR['empty_good']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_idempotently_modify(): + module_args = { + 'state': 'present', + 'use_rest': 'always', + 'vserver': 'svm_abc', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'name-services/dns', SRR['dns_record']), + ]) + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_modify_is_cluster_skip_validation(): + module_args = { + 'domains': 'new_test.com', + 'state': 'present', + 'use_rest': 'always', + 'skip_validation': True + } + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/dns', SRR['zero_records']), + ('PATCH', 'cluster', SRR['empty_good']), + # error if used skip_validation on earlier versions. + ('GET', 'cluster', SRR['is_rest']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_warning_was_raised("skip_validation is ignored for cluster DNS operations in REST.") + assert 'Error: Minimum version of ONTAP for skip_validation is (9, 9, 1)' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_has_netapp_lib(has_netapp_lib): + module_args = { + 'state': 'present', + 'use_rest': 'never', + 'vserver': 'svm_abc', + } + has_netapp_lib.return_value = False + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == 'Error: the python NetApp-Lib module is required. Import error: None' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_domain_tunnel.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_domain_tunnel.py new file mode 100644 index 000000000..eb08bf205 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_domain_tunnel.py @@ -0,0 +1,145 @@ +''' unit tests ONTAP Ansible module: na_ontap_domain_tunnel ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_domain_tunnel \ + import NetAppOntapDomainTunnel as domain_tunnel_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, {'message': "expected error", 'code': '5'}), + # module specific responses + 'domain_tunnel_record': (200, { + 'svm': { + 'name': 'ansible' + } + }, None) +} + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_domain_tunnel = { + "hostname": '10.10.10.10', + "username": 'username', + "password": 'password', + "vserver": 'ansible' + } + + def set_default_args(self): + return { + 'state': 'present', + 'hostname': self.mock_domain_tunnel['hostname'], + 'username': self.mock_domain_tunnel['username'], + 'password': self.mock_domain_tunnel['password'], + 'vserver': self.mock_domain_tunnel['vserver'] + } + + def get_domain_tunnel_mock_object(self): + domain_tunnel_obj = domain_tunnel_module() + return domain_tunnel_obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_domain_tunnel_mock_object().apply() + assert exc.value.args[0]['msg'] == SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_domain_tunnel_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['domain_tunnel_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_domain_tunnel_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_rest(self, mock_request): + data = self.set_default_args() + data['vserver'] = ['ansible1'] + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['domain_tunnel_record'], # get + SRR['domain_tunnel_record'], # modify + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_domain_tunnel_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['domain_tunnel_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_domain_tunnel_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_domain_tunnel_mock_object().apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py new file mode 100644 index 000000000..05270aef1 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_efficiency_policy.py @@ -0,0 +1,422 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_vscan_scanner_pool ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_efficiency_policy \ + import NetAppOntapEfficiencyPolicy as efficiency_module # module under test +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'state': 'present', + 'vserver': 'svm3', + 'policy_name': 'test_policy', + 'comment': 'This policy is for x and y', + 'enabled': True, + 'qos_policy': 'background', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' +} + + +threshold_info = { + 'num-records': 1, + 'attributes-list': { + 'sis-policy-info': { + 'changelog-threshold-percent': 10, + 'comment': 'This policy is for x and y', + 'enabled': 'true', + 'policy-name': 'test_policy', + 'policy-type': 'threshold', + 'qos-policy': 'background', + 'vserver': 'svm3' + } + } +} + +schedule_info = { + 'num-records': 1, + 'attributes-list': { + 'sis-policy-info': { + 'comment': 'This policy is for x and y', + 'duration': 10, + 'enabled': 'true', + 'policy-name': 'test_policy', + 'policy-type': 'scheduled', + 'qos-policy': 'background', + 'vserver': 'svm3' + } + } +} + +ZRR = zapi_responses({ + 'threshold_info': build_zapi_response(threshold_info), + 'schedule_info': build_zapi_response(schedule_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + efficiency_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_efficiency_policy(): + register_responses([ + ('sis-policy-get-iter', ZRR['empty']) + ]) + efficiency_obj = create_module(efficiency_module, DEFAULT_ARGS) + result = efficiency_obj.get_efficiency_policy() + assert not result + + +def test_get_existing_efficiency_policy(): + register_responses([ + ('sis-policy-get-iter', ZRR['threshold_info']) + ]) + efficiency_obj = create_module(efficiency_module, DEFAULT_ARGS) + result = efficiency_obj.get_efficiency_policy() + assert result + + +def test_successfully_create(): + register_responses([ + ('sis-policy-get-iter', ZRR['empty']), + ('sis-policy-create', ZRR['success']) + ]) + args = {'policy_type': 'threshold'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_create_idempotency(): + register_responses([ + ('sis-policy-get-iter', ZRR['threshold_info']) + ]) + args = {'policy_type': 'threshold'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS)['changed'] is False + + +def test_threshold_duration_failure(): + register_responses([ + ('sis-policy-get-iter', ZRR['threshold_info']) + ]) + args = {'duration': 1} + msg = create_and_apply(efficiency_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "duration cannot be set if policy_type is threshold" == msg + + +def test_threshold_schedule_failure(): + register_responses([ + ('sis-policy-get-iter', ZRR['threshold_info']) + ]) + args = {'schedule': 'test_job_schedule'} + msg = create_and_apply(efficiency_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "schedule cannot be set if policy_type is threshold" == msg + + +def test_scheduled_threshold_percent_failure(): + register_responses([ + ('sis-policy-get-iter', ZRR['schedule_info']) + ]) + args = {'changelog_threshold_percent': 30} + msg = create_and_apply(efficiency_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "changelog_threshold_percent cannot be set if policy_type is scheduled" == msg + + +def test_successfully_delete(): + register_responses([ + ('sis-policy-get-iter', ZRR['threshold_info']), + ('sis-policy-delete', ZRR['success']) + ]) + args = {'state': 'absent'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_idempotency(): + register_responses([ + ('sis-policy-get-iter', ZRR['empty']) + ]) + args = {'state': 'absent'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_modify(): + register_responses([ + ('sis-policy-get-iter', ZRR['schedule_info']), + ('sis-policy-modify', ZRR['success']) + ]) + args = {'policy_type': 'threshold'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('sis-policy-get-iter', ZRR['error']), + ('sis-policy-create', ZRR['error']), + ('sis-policy-modify', ZRR['error']), + ('sis-policy-delete', ZRR['error']) + ]) + module_args = { + 'schedule': 'test_job_schedule' + } + + my_obj = create_module(efficiency_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.get_efficiency_policy, 'fail')['msg'] + assert 'Error searching for efficiency policy test_policy: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.create_efficiency_policy, 'fail')['msg'] + assert 'Error creating efficiency policy test_policy: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.modify_efficiency_policy, 'fail', modify={'schedule': 'test_job_schedule'})['msg'] + assert 'Error modifying efficiency policy test_policy: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_efficiency_policy, 'fail')['msg'] + assert 'Error deleting efficiency policy test_policy: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +def test_switch_to_zapi(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('sis-policy-get-iter', ZRR['schedule_info']) + ]) + args = {'use_rest': 'auto'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] is False + + +SRR = rest_responses({ + 'threshold_policy_info': (200, {"records": [ + { + "uuid": "d0845ae1-a8a8-11ec-aa26-005056b323e5", + "svm": {"name": "svm3"}, + "name": "test_policy", + "type": "threshold", + "start_threshold_percent": 30, + "qos_policy": "background", + "enabled": True, + "comment": "This policy is for x and y" + } + ], "num_records": 1}, None), + 'scheduled_policy_info': (200, {"records": [ + { + "uuid": "0d1f0860-a8a9-11ec-aa26-005056b323e5", + "svm": {"name": "svm3"}, + "name": "test_policy", + "type": "scheduled", + "duration": 5, + "schedule": {"name": "daily"}, + "qos_policy": "background", + "enabled": True, + "comment": "This policy is for x and y" + } + ], "num_records": 1}, None), +}) + + +def test_successful_create_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['empty_records']), + ('POST', 'storage/volume-efficiency-policies', SRR['success']) + ]) + args = {'policy_type': 'threshold', 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args) + + +def test_create_idempotency_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['threshold_policy_info']) + ]) + args = {'policy_type': 'threshold', 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_threshold_duration_failure_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['threshold_policy_info']) + ]) + args = {'duration': 1, 'use_rest': 'always'} + msg = create_and_apply(efficiency_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "duration cannot be set if policy_type is threshold" == msg + + +def test_threshold_schedule_failure_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['threshold_policy_info']) + ]) + args = {'schedule': 'test_job_schedule', 'use_rest': 'always'} + msg = create_and_apply(efficiency_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "schedule cannot be set if policy_type is threshold" == msg + + +def test_scheduled_threshold_percent_failure_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['scheduled_policy_info']) + ]) + args = {'changelog_threshold_percent': 30, 'use_rest': 'always'} + msg = create_and_apply(efficiency_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "changelog_threshold_percent cannot be set if policy_type is scheduled" == msg + + +def test_successfully_delete_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['scheduled_policy_info']), + ('DELETE', 'storage/volume-efficiency-policies/0d1f0860-a8a9-11ec-aa26-005056b323e5', SRR['success']) + ]) + args = {'state': 'absent', 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_idempotency_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['empty_records']) + ]) + args = {'state': 'absent', 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_modify_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['scheduled_policy_info']), + ('PATCH', 'storage/volume-efficiency-policies/0d1f0860-a8a9-11ec-aa26-005056b323e5', SRR['success']) + ]) + args = {'policy_type': 'threshold', 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_modify_duration_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['scheduled_policy_info']), + ('PATCH', 'storage/volume-efficiency-policies/0d1f0860-a8a9-11ec-aa26-005056b323e5', SRR['success']) + ]) + args = {'duration': 10, 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_modify_duration_set_hyphen_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['scheduled_policy_info']), + ('PATCH', 'storage/volume-efficiency-policies/0d1f0860-a8a9-11ec-aa26-005056b323e5', SRR['success']) + ]) + args = {'duration': "-", 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_modify_changelog_threshold_percent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['threshold_policy_info']), + ('PATCH', 'storage/volume-efficiency-policies/d0845ae1-a8a8-11ec-aa26-005056b323e5', SRR['success']) + ]) + args = {'changelog_threshold_percent': 40, 'use_rest': 'always'} + assert create_and_apply(efficiency_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volume-efficiency-policies', SRR['generic_error']), + ('POST', 'storage/volume-efficiency-policies', SRR['generic_error']), + ('PATCH', 'storage/volume-efficiency-policies', SRR['generic_error']), + ('DELETE', 'storage/volume-efficiency-policies', SRR['generic_error']) + ]) + module_args = { + 'schedule': 'test_job_schedule', + 'use_rest': 'always' + } + + my_obj = create_module(efficiency_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.get_efficiency_policy, 'fail')['msg'] + assert 'calling: storage/volume-efficiency-policies: got Expected error.' in error + + error = expect_and_capture_ansible_exception(my_obj.create_efficiency_policy, 'fail')['msg'] + assert 'calling: storage/volume-efficiency-policies: got Expected error.' in error + + error = expect_and_capture_ansible_exception(my_obj.modify_efficiency_policy, 'fail', modify={'schedule': 'test_job_schedule'})['msg'] + assert 'calling: storage/volume-efficiency-policies: got Expected error.' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_efficiency_policy, 'fail')['msg'] + assert 'calling: storage/volume-efficiency-policies: got Expected error.' in error + + +def test_module_error_ontap_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + module_args = {'use_rest': 'always'} + msg = create_module(efficiency_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Error: REST requires ONTAP 9.8 or later for efficiency_policy APIs.' == msg + + +def test_module_error_duration_in_threshold(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + module_args = { + 'use_rest': 'always', + 'policy_type': 'threshold', + 'duration': 1 + } + msg = create_module(efficiency_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'duration cannot be set if policy_type is threshold' == msg + + +def test_module_error_schedule_in_threshold(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + module_args = { + 'use_rest': 'always', + 'policy_type': 'threshold', + 'schedule': 'daily' + } + msg = create_module(efficiency_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'schedule cannot be set if policy_type is threshold' == msg + + +def test_module_error_changelog_threshold_percent_in_schedule(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + module_args = { + 'use_rest': 'always', + 'policy_type': 'scheduled', + 'changelog_threshold_percent': 20 + } + msg = create_module(efficiency_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'changelog_threshold_percent cannot be set if policy_type is scheduled' == msg diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_destination.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_destination.py new file mode 100644 index 000000000..ca951ba58 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_destination.py @@ -0,0 +1,226 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_ems_destination module ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception, call_main +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ems_destination \ + import NetAppOntapEmsDestination as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +SRR = rest_responses({ + 'ems_destination': (200, { + "records": [ + { + "name": "test", + "type": "rest-api", + "destination": "https://test.destination", + "filters": [ + { + "name": "test-filter" + } + ] + }], + "num_records": 1 + }, None), + 'missing_key': (200, { + "records": [ + { + "name": "test", + "type": "rest_api", + "destination": "https://test.destination" + }], + "num_records": 1 + }, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + +} + + +def test_get_ems_destination_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['empty_records']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_ems_destination('test') is None + + +def test_get_ems_destination_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['generic_error']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + my_module_object = create_module(my_module, DEFAULT_ARGS, module_args) + msg = 'Error fetching EMS destination for test: calling: support/ems/destinations: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_ems_destination, 'fail', 'test')['msg'] + + +def test_create_ems_destination(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['empty_records']), + ('POST', 'support/ems/destinations', SRR['empty_good']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_ems_destination_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'support/ems/destinations', SRR['generic_error']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.create_ems_destination, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating EMS destinations for test: calling: support/ems/destinations: got Expected error.' == error + + +def test_delete_ems_destination(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['ems_destination']), + ('DELETE', 'support/ems/destinations/test', SRR['empty_good']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter'], 'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_ems_destination_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('DELETE', 'support/ems/destinations/test', SRR['generic_error']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter'], 'state': 'absent'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.delete_ems_destination, 'fail', 'test')['msg'] + print('Info: %s' % error) + assert 'Error deleting EMS destination for test: calling: support/ems/destinations/test: got Expected error.' == error + + +def test_modify_ems_destination_filter(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['missing_key']), + ('PATCH', 'support/ems/destinations/test', SRR['empty_good']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['other-filter']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_destination_rest_api_idempotent(): + """ verify that rest-api is equivalent to rest_api """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['ems_destination']), + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_destination_target(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['ems_destination']), + ('PATCH', 'support/ems/destinations/test', SRR['empty_good']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://different.destination', 'filters': ['test-filter']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_destination_type(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['ems_destination']), + ('DELETE', 'support/ems/destinations/test', SRR['empty_good']), + ('POST', 'support/ems/destinations', SRR['empty_good']) + ]) + module_args = {'name': 'test', 'type': 'email', 'destination': 'test@hq.com', 'filters': ['test-filter']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_destination_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('PATCH', 'support/ems/destinations/test', SRR['generic_error']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['other-filter']} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + modify = {'filters': ['other-filter']} + error = expect_and_capture_ansible_exception(my_obj.modify_ems_destination, 'fail', 'test', modify)['msg'] + print('Info: %s' % error) + assert 'Error modifying EMS destination for test: calling: support/ems/destinations/test: got Expected error.' == error + + +def test_module_fail_without_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_zapi']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + assert 'na_ontap_ems_destination is only supported with REST API' == error + + +def test_apply_returns_errors_from_get_destination(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['generic_error']) + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + assert 'Error fetching EMS destination for test: calling: support/ems/destinations: got Expected error.' == error + + +def test_check_mode_creates_no_destination(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['empty_records']), + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args, check_mode=True)['changed'] + + +def test_changed_set_to_ok_for_expected_values(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/destinations', SRR['ems_destination']), + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args, check_mode=True)['changed'] + + +def test_empty_modify_skips_patch(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = {'name': 'test', 'type': 'rest_api', 'destination': 'https://test.destination', 'filters': ['test-filter']} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.modify_ems_destination('test', {}) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_filter.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_filter.py new file mode 100644 index 000000000..f7f0a1feb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ems_filter.py @@ -0,0 +1,308 @@ +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_ems_filter module ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception, call_main +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ems_filter \ + import NetAppOntapEMSFilters as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'ems_filter': (200, { + "name": "snmp-traphost", + "rules": [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "error,informational", + "name_pattern": "callhome.*", + } + }, { + "index": "2", + "type": "exclude", + "message_criteria": { + "severities": "*", + "name_pattern": "*", + "snmp_trap_types": "*", + } + }] + }, None), + 'ems_filter_2_riles': (200, { + "name": "snmp-traphost", + "rules": [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "error,informational", + "name_pattern": "callhome.*", + } + }, { + "index": "2", + "type": "include", + "message_criteria": { + "severities": "alert", + "name_pattern": "callhome.*", + } + }, { + "index": "3", + "type": "exclude", + "message_criteria": { + "severities": "*", + "name_pattern": "*", + "snmp_trap_types": "*", + } + }] + }, None), + 'ems_filter_no_rules': (200, { + "name": "snmp-traphost", + }, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': "snmp-traphost" +} + +DEFAULT_RULE = [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "error,informational", + "name_pattern": "callhome.*", + } +}] + + +DEFAULT_RULE_2_RULES = [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "error,informational", + "name_pattern": "callhome.*", + }}, { + "index": "2", + "type": "include", + "message_criteria": { + "severities": "alert", + "name_pattern": "callhome.*", + }}] + +DEFAULT_RULE_MODIFY_TYPE_2_RULES = [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "error,informational", + "name_pattern": "callhome.*", + } +}, { + "index": "2", + "type": "exclude", + "message_criteria": { + "severities": "alert", + "name_pattern": "callhome.*", + } +}] + +DEFAULT_RULE_MODIFY_SEVERITIES_2_RULES = [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "informational", + "name_pattern": "callhome.*", + } +}, { + "index": "2", + "type": "include", + "message_criteria": { + "severities": "alert", + "name_pattern": "callhome.*", + } +}] + +DEFAULT_RULE_MODIFY_NAME_PATTERN_2_RULES = [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "error,informational", + "name_pattern": "*", + } +}, { + "index": "2", + "type": "include", + "message_criteria": { + "severities": "alert", + "name_pattern": "callhome.*", + } +}] + +DEFAULT_RULE_STARS = [{ + "index": "1", + "type": "include", + "message_criteria": { + "severities": "*", + "name_pattern": "*", + } +}] + + +def test_get_ems_filter_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_ems_filter() is None + + +def test_get_ems_filter_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching ems filter snmp-traphost: calling: support/ems/filters: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_ems_filter, 'fail')['msg'] + + +def test_get_ems_filter_get(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_ems_filter() is not None + + +def test_create_ems_filter(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['empty_records']), + ('POST', 'support/ems/filters', SRR['empty_good']) + ]) + module_args = {'rules': DEFAULT_RULE} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_ems_filter_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'support/ems/filters', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['rules'] = DEFAULT_RULE + error = expect_and_capture_ansible_exception(my_obj.create_ems_filter, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating EMS filter snmp-traphost: calling: support/ems/filters: got Expected error.' == error + + +def test_delete_ems_filter(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter']), + ('DELETE', 'support/ems/filters/snmp-traphost', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_ems_filter_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('DELETE', 'support/ems/filters/snmp-traphost', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['rules'] = DEFAULT_RULE + error = expect_and_capture_ansible_exception(my_obj.delete_ems_filter, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting EMS filter snmp-traphost: calling: support/ems/filters/snmp-traphost: got Expected error.' == error + + +def test_modify_ems_filter_add_rule(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter']), + ('PATCH', 'support/ems/filters/snmp-traphost', SRR['empty_good']) + ]) + module_args = {'rules': DEFAULT_RULE_2_RULES} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_filter_change_type(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter_2_riles']), + ('PATCH', 'support/ems/filters/snmp-traphost', SRR['empty_good']) + ]) + module_args = {'rules': DEFAULT_RULE_MODIFY_TYPE_2_RULES} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_filter_change_severities(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter_2_riles']), + ('PATCH', 'support/ems/filters/snmp-traphost', SRR['empty_good']) + ]) + module_args = {'rules': DEFAULT_RULE_MODIFY_SEVERITIES_2_RULES} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_filter_change_name_pattern(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter_2_riles']), + ('PATCH', 'support/ems/filters/snmp-traphost', SRR['empty_good']) + ]) + module_args = {'rules': DEFAULT_RULE_MODIFY_NAME_PATTERN_2_RULES} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ems_filter_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('PATCH', 'support/ems/filters/snmp-traphost', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['rules'] = DEFAULT_RULE_2_RULES + error = expect_and_capture_ansible_exception(my_obj.modify_ems_filter, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error modifying EMS filter snmp-traphost: calling: support/ems/filters/snmp-traphost: got Expected error.' == error + + +def test_modify_ems_filter_no_rules(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter_no_rules']), + ]) + assert not create_and_apply(my_module, DEFAULT_ARGS, {})['changed'] + + +def test_modify_star_test(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'support/ems/filters', SRR['ems_filter']), + ('PATCH', 'support/ems/filters/snmp-traphost', SRR['empty_good']) + ]) + module_args = {'rules': DEFAULT_RULE_STARS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py new file mode 100644 index 000000000..6d62fc497 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy.py @@ -0,0 +1,277 @@ +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_volume_export_policy ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy \ + import NetAppONTAPExportPolicy as policy_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_uuid_policy_id_export_policy': ( + 200, + { + "records": [{ + "svm": { + "uuid": "uuid", + "name": "svm"}, + "id": 123, + "name": "ansible" + }], + "num_records": 1}, None), + "no_record": ( + 200, + {"num_records": 0}, + None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'export_policy': + xml = self.build_export_policy_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_export_policy_info(export_policy_details): + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': {'export-policy-info': {'name': export_policy_details['name'] + }}} + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_job_schedule ''' + + def setUp(self): + self.mock_export_policy = { + 'name': 'test_policy', + 'vserver': 'test_vserver' + } + + def mock_args(self, rest=False): + if rest: + return { + 'vserver': self.mock_export_policy['vserver'], + 'name': self.mock_export_policy['name'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + else: + return { + 'vserver': self.mock_export_policy['vserver'], + 'name': self.mock_export_policy['name'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' + } + + def get_export_policy_mock_object(self, cx_type='zapi', kind=None): + policy_obj = policy_module() + if cx_type == 'zapi': + if kind is None: + policy_obj.server = MockONTAPConnection() + elif kind == 'export_policy': + policy_obj.server = MockONTAPConnection(kind='export_policy', data=self.mock_export_policy) + return policy_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + policy_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy.NetAppONTAPExportPolicy.create_export_policy') + def test_successful_create(self, create_export_policy): + ''' Test successful create ''' + data = self.mock_args() + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_export_policy_mock_object().apply() + assert exc.value.args[0]['changed'] + create_export_policy.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy.NetAppONTAPExportPolicy.get_export_policy') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy.NetAppONTAPExportPolicy.rename_export_policy') + def test_successful_rename(self, rename_export_policy, get_export_policy): + ''' Test successful rename ''' + data = self.mock_args() + data['from_name'] = 'old_policy' + set_module_args(data) + get_export_policy.side_effect = [ + None, + {'policy-name': 'old_policy'} + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_export_policy_mock_object().apply() + assert exc.value.args[0]['changed'] + rename_export_policy.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + '''Test successful rest create''' + data = self.mock_args(rest=True) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_record'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_delete(self, mock_request): + '''Test successful rest delete''' + data = self.mock_args(rest=True) + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid_policy_id_export_policy'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_fail_get_export_policy(self, mock_request): + '''Test successful rest delete''' + data = self.mock_args(rest=True) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + assert 'Error on fetching export policy: calling: protocols/nfs/export-policies/: got Expected error' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_ignore_from_name_when_state_absent(self, mock_request): + '''Test from_name is skipped for state absent''' + data = self.mock_args(rest=True) + data['from_name'] = 'ansible' + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid_policy_id_export_policy'], # this is record for name, from_name is skipped. + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_rename(self, mock_request): + '''Test successful rest rename''' + data = self.mock_args(rest=True) + data['from_name'] = 'ansible' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_record'], + SRR['get_uuid_policy_id_export_policy'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_create(self, mock_request): + '''Test error rest create''' + data = self.mock_args(rest=True) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_record'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + assert 'Error on creating export policy: calling: protocols/nfs/export-policies: got Expected error.' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_delete(self, mock_request): + '''Test error rest delete''' + data = self.mock_args(rest=True) + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid_policy_id_export_policy'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + print(exc.value.args[0]['msg']) + assert 'Error on deleting export policy: calling: protocols/nfs/export-policies/123: got Expected error.' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_rename(self, mock_request): + '''Test error rest rename''' + data = self.mock_args(rest=True) + data['from_name'] = 'ansible' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_record'], + SRR['get_uuid_policy_id_export_policy'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_export_policy_mock_object(cx_type='rest').apply() + print(exc.value.args[0]['msg']) + assert 'Error on renaming export policy: calling: protocols/nfs/export-policies/123: got Expected error.' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py new file mode 100644 index 000000000..66709fc0b --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule.py @@ -0,0 +1,404 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy_rule import NetAppontapExportRule as my_module, main as my_main + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +policy = { + 'attributes-list': { + 'export-policy-info': { + 'policy-name': 'name', + 'policy-id': '345' + }}} + +policy_rule = { + 'attributes-list': { + 'export-rule-info': { + 'policy-name': 'policy_name', + 'client-match': 'client_match', + 'ro-rule': [{ + 'security-flavor': 'any' + }], + 'rw-rule': [{ + 'security-flavor': 'any' + }], + 'protocol': [{ + 'access-protocol': 'protocol' + }], + 'super-user-security': { + 'security-flavor': 'any' + }, + 'is-allow-set-uid-enabled': 'false', + 'rule-index': 123, + 'anonymous-user-id': 'anonymous_user_id', + 'is-allow-dev-is-enabled': 'false', + 'export-chown-mode': 'restricted' + }}} + +policy_rule_two_records = { + 'attributes-list': [ + {'export-rule-info': { + 'policy-name': 'policy_name', + 'client-match': 'client_match1,client_match2', + 'ro-rule': [{ + 'security-flavor': 'any' + }], + 'rw-rule': [{ + 'security-flavor': 'any' + }], + 'protocol': [{ + 'access-protocol': 'protocol' + }], + 'super-user-security': { + 'security-flavor': 'any' + }, + 'is-allow-set-uid-enabled': 'false', + 'rule-index': 123, + 'anonymous-user-id': 'anonymous_user_id', + 'is-allow-dev-is-enabled': 'false', + 'export-chown-mode': 'restricted' + }}, + {'export-rule-info': { + 'policy-name': 'policy_name', + 'client-match': 'client_match2,client_match1', + 'ro-rule': [{ + 'security-flavor': 'any' + }], + 'rw-rule': [{ + 'security-flavor': 'any' + }], + 'protocol': [{ + 'access-protocol': 'protocol' + }], + 'super-user-security': { + 'security-flavor': 'any' + }, + 'is-allow-set-uid-enabled': 'false', + 'rule-index': 123, + 'anonymous-user-id': 'anonymous_user_id', + 'is-allow-dev-is-enabled': 'false', + 'export-chown-mode': 'restricted' + }}] +} + + +ZRR = zapi_responses({ + 'one_policy_record': build_zapi_response(policy, 1), + 'one_bad_policy_record': build_zapi_response({'error': 'no_policy_id'}, 1), + 'one_rule_record': build_zapi_response(policy_rule, 1), + 'two_rule_records': build_zapi_response(policy_rule_two_records, 2), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never', + 'policy_name': 'policy_name', + 'vserver': 'vserver', + +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + args = dict(DEFAULT_ARGS) + args.pop('vserver') + error = 'missing required arguments:' + assert error in call_main(my_main, args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + error = 'Error: the python NetApp-Lib module is required. Import error: None' + assert error in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] + + +def test_get_nonexistent_rule(): + ''' Test if get_export_policy_rule returns None for non-existent policy ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ]) + module_args = { + 'rule_index': 3 + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_export_policy_rule(3) is None + + +def test_get_nonexistent_policy(): + ''' Test if get_export_policy returns None for non-existent policy ''' + register_responses([ + ('ZAPI', 'export-policy-get-iter', ZRR['no_records']), + ]) + module_args = { + 'rule_index': 3 + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.set_export_policy_id() is None + + +def test_get_existing_rule(): + ''' Test if get_export_policy_rule returns rule details for existing policy ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['one_rule_record']), + ]) + module_args = { + 'rule_index': 3 + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + result = my_obj.get_export_policy_rule(3) + assert result + assert result['name'] == 'policy_name' + assert result['client_match'] == ['client_match'] + assert result['ro_rule'] == ['any'] + + +def test_get_existing_policy(): + ''' Test if get_export_policy returns policy details for existing policy ''' + register_responses([ + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ]) + module_args = { + 'rule_index': 3 + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.set_export_policy_id() + assert my_obj.policy_id == '345' + + +def test_create_missing_param_error(): + ''' Test validation error from create ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ]) + module_args = { + 'client_match': 'client_match', + 'rw_rule': 'any', + 'rule_index': 3 + } + msg = 'Error: Missing required option for creating export policy rule: ro_rule' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_successful_create_with_index(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ('ZAPI', 'export-policy-get-iter', ZRR['no_records']), + ('ZAPI', 'export-policy-create', ZRR['success']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ('ZAPI', 'export-rule-create', ZRR['success']), + ]) + module_args = { + 'client_match': 'client_match', + 'rw_rule': 'any', + 'ro_rule': 'any', + 'rule_index': 123 + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_no_index(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ('ZAPI', 'export-rule-create', ZRR['success']), + ]) + module_args = { + 'client_match': 'client_match', + 'rw_rule': 'any', + 'ro_rule': 'any' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['one_rule_record']), + ]) + module_args = { + 'client_match': 'client_match', + 'rw_rule': 'any', + 'ro_rule': 'any' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete(): + ''' Test delete ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['one_rule_record']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ('ZAPI', 'export-rule-destroy', ZRR['success']), + ]) + module_args = { + 'state': 'absent', + 'rule_index': 3 + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ]) + module_args = { + 'state': 'absent', + 'rule_index': 3 + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + ''' Test successful modify protocol ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['one_rule_record']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ('ZAPI', 'export-rule-modify', ZRR['success']), + ]) + module_args = { + 'protocol': ['cifs'], + 'allow_suid': True, + 'rule_index': 3, + 'allow_device_creation': True, + 'chown_mode': 'unrestricted' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_on_ambiguous_delete(): + ''' Test error if multiple entries match for a delete ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['two_rule_records']), + ]) + module_args = { + 'state': 'absent', + 'client_match': 'client_match1,client_match2', + 'rw_rule': 'any', + 'ro_rule': 'any' + } + error = "Error multiple records exist for query:" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_helper_query_parameters(): + ''' Test helper method set_query_parameters() ''' + register_responses([ + ]) + module_args = { + 'client_match': 'client_match1,client_match2', + 'rw_rule': 'any', + 'ro_rule': 'any' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + result = my_obj.set_query_parameters(10) + print(result) + assert 'query' in result + assert 'export-rule-info' in result['query'] + assert result['query']['export-rule-info']['rule-index'] == 10 + result = my_obj.set_query_parameters(None) + print(result) + assert 'client-match' not in result['query']['export-rule-info'] + assert result['query']['export-rule-info']['rw-rule'] == [{'security-flavor': 'any'}] + + +def test_error_calling_zapis(): + ''' Test error handing ''' + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['error']), + ('ZAPI', 'export-policy-get-iter', ZRR['error']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_bad_policy_record']), + ('ZAPI', 'export-rule-create', ZRR['error']), + ('ZAPI', 'export-policy-create', ZRR['error']), + ('ZAPI', 'export-rule-destroy', ZRR['error']), + ('ZAPI', 'export-rule-modify', ZRR['error']), + ('ZAPI', 'export-rule-set-index', ZRR['error']), + ]) + module_args = { + 'client_match': 'client_match1,client_match2', + 'rw_rule': 'any', + 'ro_rule': 'any', + 'from_rule_index': 123, + 'rule_index': 124, + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = zapi_error_message('Error getting export policy rule policy_name') + assert error in expect_and_capture_ansible_exception(my_obj.get_export_policy_rule, 'fail', None)['msg'] + error = zapi_error_message('Error getting export policy policy_name') + assert error in expect_and_capture_ansible_exception(my_obj.set_export_policy_id, 'fail')['msg'] + error = 'Error getting export policy id for policy_name: got' + assert error in expect_and_capture_ansible_exception(my_obj.set_export_policy_id, 'fail')['msg'] + error = zapi_error_message('Error creating export policy rule policy_name') + assert error in expect_and_capture_ansible_exception(my_obj.create_export_policy_rule, 'fail')['msg'] + error = zapi_error_message('Error creating export policy policy_name') + assert error in expect_and_capture_ansible_exception(my_obj.create_export_policy, 'fail')['msg'] + error = zapi_error_message('Error deleting export policy rule policy_name') + assert error in expect_and_capture_ansible_exception(my_obj.delete_export_policy_rule, 'fail', 123)['msg'] + error = zapi_error_message('Error modifying export policy rule index 123') + assert error in expect_and_capture_ansible_exception(my_obj.modify_export_policy_rule, 'fail', {'rw_rule': ['any']}, 123)['msg'] + error = zapi_error_message('Error reindexing export policy rule index 123') + assert error in expect_and_capture_ansible_exception(my_obj.modify_export_policy_rule, 'fail', {'rule_index': 123}, 123, True)['msg'] + + +def test_index_existing_entry(): + """ validate entry can be found without index, and add index """ + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['no_records']), + ('ZAPI', 'export-rule-get-iter', ZRR['one_rule_record']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ('ZAPI', 'export-rule-set-index', ZRR['success']), + ]) + module_args = { + 'client_match': 'client_match', + 'rw_rule': 'any', + 'ro_rule': 'any', + 'rule_index': 124, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_no_index(): + """ validate entry can be found without index, and deleted """ + register_responses([ + ('ZAPI', 'export-rule-get-iter', ZRR['two_rule_records']), + ('ZAPI', 'export-policy-get-iter', ZRR['one_policy_record']), + ('ZAPI', 'export-rule-destroy', ZRR['success']), + ]) + module_args = { + 'client_match': 'client_match2,client_match1', + 'rw_rule': 'any', + 'ro_rule': 'any', + 'state': 'absent', + 'force_delete_on_first_match': True, + 'allow_suid': False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule_rest.py new file mode 100644 index 000000000..b1fb870e5 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_export_policy_rule_rest.py @@ -0,0 +1,387 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + call_main, patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy_rule \ + import NetAppontapExportRule as policy_rule, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request. +# The rest_factory provides default responses shared across testcases. +SRR = rest_responses({ + 'get_uuid_policy_id_export_policy': (200, {"records": [ + { + "svm": {"uuid": "uuid", "name": "svm"}, + "id": 123, + "name": "ansible" + }], "num_records": 1}, None), + 'get_export_policy_rules': (200, {"records": [ + { + "rw_rule": ["any"], + "_links": {"self": {"href": "/api/resourcelink"}}, + "ro_rule": ["any"], + "allow_suid": True, + "chown_mode": "restricted", + "index": 10, + "superuser": ["any"], + "protocols": ["any"], + "anonymous_user": "1234", + "clients": [{"match": "10.10.0.0/16"}, {"match": "10.0.0.0/16"}, {"match": "10.20.0.0/16"}], + "ntfs_unix_security": "fail", + "allow_device_creation": True + }], "num_records": 1}, None), + 'get_export_policy_two_rules': (200, {"records": [ + { + "rw_rule": ["any"], + "_links": {"self": {"href": "/api/resourcelink"}}, + "ro_rule": ["any"], + "allow_suid": True, + "chown_mode": "restricted", + "index": 10, + "superuser": ["any"], + "protocols": ["any"], + "anonymous_user": "1234", + "clients": [{"match": "0.0.0.0/0"}], + "ntfs_unix_security": "fail", + "allow_device_creation": True + }, + { + "rw_rule": ["any"], + "ro_rule": ["any"], + "allow_suid": True, + "chown_mode": "restricted", + "index": 11, + "superuser": ["any"], + "protocols": ["any"], + "anonymous_user": "1234", + "clients": [{"match": "0.0.0.0/0"}], + "ntfs_unix_security": "fail", + "allow_device_creation": True + }], "num_records": 2}, None), + 'create_export_policy_rules': (200, {"records": [ + { + "rw_rule": ["any"], + "_links": {"self": {"href": "/api/resourcelink"}}, + "ro_rule": ["any"], + "allow_suid": True, + "chown_mode": "restricted", + "index": 1, + "superuser": ["any"], + "protocols": ["any"], + "anonymous_user": "1234", + "clients": [{"match": "0.0.0.0/0"}], + "ntfs_unix_security": "fail", + "allow_device_creation": True + }], "num_records": 1}, None), + 'error_does_not_exist': (400, None, {'message': "entry doesn't exist"}) +}) + + +DEFAULT_ARGS = { + 'name': 'test', + 'client_match': ['1.1.1.0', '0.0.0.0/0'], + 'vserver': 'test', + 'protocol': 'nfs', + 'anonymous_user_id': '65534', + 'super_user_security': ['any'], + 'ntfs_unix_security': 'fail', + 'ro_rule': 'any', + 'rw_rule': 'any', + 'allow_device_creation': True, + 'allow_suid': True, + 'chown_mode': 'restricted', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', +} + + +def test_rest_successful_create_rule(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['empty_records']), + ('POST', 'protocols/nfs/export-policies/123/rules?return_records=true', SRR['create_export_policy_rules']), + ('PATCH', 'protocols/nfs/export-policies/123/rules/1', SRR['empty_records']) + ]) + assert create_and_apply(policy_rule, DEFAULT_ARGS, {'rule_index': 10})['changed'] + + +def test_rest_error_get_policy(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['generic_error']) + ]) + my_module_object = create_module(policy_rule, DEFAULT_ARGS) + msg = 'Error on fetching export policy: calling: protocols/nfs/export-policies: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_export_policy_rule_rest, 'fail', 1)['msg'] + + +def test_rest_error_get_rule(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', SRR['generic_error']), + # 2nd try - this time without index + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['generic_error']), + # 3rd try + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['error_does_not_exist']), + # 4thtry + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['get_export_policy_two_rules']), + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'any', + 'super_user_security': 'any', + 'client_match': ['0.0.0.0/0'], + 'ntfs_unix_security': 'fail', + 'ro_rule': ['any'], + 'rw_rule': ['any'], + 'rule_index': 10 + } + my_module_object = create_module(policy_rule, DEFAULT_ARGS, module_args) + msg = rest_error_message('Error on fetching export policy rule', 'protocols/nfs/export-policies/123/rules/10') + assert msg in expect_and_capture_ansible_exception(my_module_object.get_export_policy_rule, 'fail', 10)['msg'] + # error with no index + msg = rest_error_message('Error on fetching export policy rules', 'protocols/nfs/export-policies/123/rules') + assert msg in expect_and_capture_ansible_exception(my_module_object.get_export_policy_rule, 'fail', None)['msg'] + # does not exist error is ignored + assert my_module_object.get_export_policy_rule(None) is None + # multiple entries error + msg = 'Error multiple records exist for query:' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_export_policy_rule, 'fail', None)['msg'] + + +def test_rest_error_create_rule(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['empty_records']), + ('POST', 'protocols/nfs/export-policies/123/rules?return_records=true', SRR['generic_error']), + # 2nd call + ('GET', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['empty_records']), + ('POST', 'protocols/nfs/export-policies/123/rules?return_records=true', SRR['empty_records']) + ]) + my_module_object = create_module(policy_rule, DEFAULT_ARGS, {'rule_index': 10}) + msg = rest_error_message('Error on creating export policy rule', 'protocols/nfs/export-policies/123/rules?return_records=true') + assert msg in expect_and_capture_ansible_exception(my_module_object.apply, 'fail')['msg'] + msg = 'Error on creating export policy rule, returned response is invalid:' + assert msg in expect_and_capture_ansible_exception(my_module_object.apply, 'fail')['msg'] + + +def test_rest_successful_delete_rule(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', copy.deepcopy(SRR['get_export_policy_rules'])), + ('DELETE', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_good']) + ]) + assert create_and_apply(policy_rule, DEFAULT_ARGS, {'rule_index': 10, 'state': 'absent'})['changed'] + + +def test_rest_error_delete(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', copy.deepcopy(SRR['get_export_policy_rules'])), + ('DELETE', 'protocols/nfs/export-policies/123/rules/10', SRR['generic_error']) + ]) + my_module_object = create_module(policy_rule, DEFAULT_ARGS, {'rule_index': 10, 'state': 'absent'}) + msg = 'Error on deleting export policy Rule: calling: protocols/nfs/export-policies/123/rules/10: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.apply, 'fail')['msg'] + + +def test_rest_successful_create_policy_and_rule(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies', SRR['empty_records']), + ('POST', 'protocols/nfs/export-policies', SRR['empty_good']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('POST', 'protocols/nfs/export-policies/123/rules?return_records=true', SRR['create_export_policy_rules']), + ('PATCH', 'protocols/nfs/export-policies/123/rules/1', SRR['empty_records']) + ]) + assert create_and_apply(policy_rule, DEFAULT_ARGS, {'rule_index': 10})['changed'] + + +def test_rest_error_creating_policy(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies', SRR['empty_records']), + ('POST', 'protocols/nfs/export-policies', SRR['generic_error']), + ]) + my_module_object = create_module(policy_rule, DEFAULT_ARGS) + msg = 'Error on creating export policy: calling: protocols/nfs/export-policies: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.apply, 'fail')['msg'] + + +def test_rest_successful_modify(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', copy.deepcopy(SRR['get_export_policy_rules'])), + ('PATCH', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_good']) + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'nfs4', + 'super_user_security': 'krb5i', + 'client_match': ['1.1.1.3', '1.1.0.3'], + 'ntfs_unix_security': 'ignore', + 'ro_rule': ['never'], + 'rw_rule': ['never'], + 'rule_index': 10, + 'allow_device_creation': False, + 'allow_suid': False, + 'chown_mode': 'unrestricted' + } + assert create_and_apply(policy_rule, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_modify(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', copy.deepcopy(SRR['get_export_policy_rules'])), + ('PATCH', 'protocols/nfs/export-policies/123/rules/10', SRR['generic_error']) + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'nfs4', + 'super_user_security': 'krb5i', + 'rule_index': 10 + } + + my_module_object = create_module(policy_rule, DEFAULT_ARGS, module_args) + msg = 'Error on modifying export policy Rule: calling: protocols/nfs/export-policies/123/rules/10: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.apply, 'fail')['msg'] + + +def test_rest_successful_rename(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/2', SRR['empty_records']), + ('GET', 'protocols/nfs/export-policies/123/rules/10', copy.deepcopy(SRR['get_export_policy_rules'])), + ('PATCH', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_records']) + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'nfs4', + 'super_user_security': 'krb5i', + 'client_match': ['1.1.1.3', '1.1.0.3'], + 'ntfs_unix_security': 'ignore', + 'ro_rule': ['never'], + 'rw_rule': ['never'], + 'rule_index': 2, + 'from_rule_index': 10, + 'allow_device_creation': False, + 'allow_suid': False, + 'chown_mode': 'unrestricted' + } + assert create_and_apply(policy_rule, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successful_rename_no_from_index(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/2', SRR['error_does_not_exist']), + ('GET', 'protocols/nfs/export-policies/123/rules', copy.deepcopy(SRR['get_export_policy_rules'])), + ('PATCH', 'protocols/nfs/export-policies/123/rules/10', SRR['empty_records']) + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'any', + 'super_user_security': 'any', + 'client_match': ["10.10.0.0/16", "10.20.0.0/16", "10.0.0.0/16"], + 'ntfs_unix_security': 'fail', + 'ro_rule': ['any'], + 'rw_rule': ['any'], + 'rule_index': 2 + } + assert create_and_apply(policy_rule, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_rename_with_from_index_not_found(): + """ rename is requested but from rule is not found """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/3', SRR['error_does_not_exist']), + ('GET', 'protocols/nfs/export-policies/123/rules/2', SRR['error_does_not_exist']), + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'nfs4', + 'super_user_security': 'krb5i', + 'client_match': ['1.1.1.3', '1.1.0.3'], + 'ntfs_unix_security': 'ignore', + 'ro_rule': ['never'], + 'rw_rule': ['never'], + 'rule_index': 3, + 'from_rule_index': 2, + } + msg = 'Error reindexing: export policy rule 2 does not exist.' + assert msg in create_and_apply(policy_rule, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_delete_no_index_multiple(): + """ delete is requested but 2 rules are found """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['get_export_policy_two_rules']), + # 2nd run + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules', SRR['get_export_policy_two_rules']), + ('DELETE', 'protocols/nfs/export-policies/123/rules/10', SRR['success']) + ]) + module_args = { + 'anonymous_user_id': '1234', + 'protocol': 'any', + 'super_user_security': 'any', + 'client_match': ['0.0.0.0/0'], + 'ntfs_unix_security': 'fail', + 'ro_rule': ['any'], + 'rw_rule': ['any'], + 'state': 'absent' + } + msg = 'Error multiple records exist for query:' + assert msg in create_and_apply(policy_rule, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['force_delete_on_first_match'] = True + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fcp_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fcp_rest.py new file mode 100644 index 000000000..4bd7c35a8 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fcp_rest.py @@ -0,0 +1,231 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fcp \ + import NetAppOntapFCP as fcp # module under test + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'fcp_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "enabled": True, + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + } + } + ], + "num_records": 1 + }, None + ), + 'fcp_record_disabled': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "enabled": False, + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + } + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.data = data + self.xml_in = None + self.xml_out = None + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.mock_rule = {} + + def mock_args(self, rest=False): + if rest: + return { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'test_vserver', + } + + def get_mock_object(self, kind=None): + """ + Helper method to return an na_ontap_firewall_policy object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_firewall_policy object + """ + obj = fcp() + return obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_get(self, mock_request): + '''Test error rest create''' + data = self.mock_args(rest=True) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_mock_object().apply() + assert 'Error on fetching fcp: calling: protocols/san/fcp/services: got Expected error.' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + '''Test successful rest create''' + data = self.mock_args(rest=True) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_record'], + SRR['empty_good'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_create(self, mock_request): + '''Test error rest create''' + data = self.mock_args(rest=True) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_record'], + SRR['generic_error'], + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_mock_object().apply() + assert 'Error on creating fcp: calling: protocols/san/fcp/services: got Expected error.' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_delete(self, mock_request): + '''Test successful rest delete''' + data = self.mock_args(rest=True) + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + # the module under test modifies record directly, and may cause other tests to fail + copy.deepcopy(SRR['fcp_record']), + SRR['empty_good'], + SRR['empty_good'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_delete(self, mock_request): + '''Test error rest delete''' + data = self.mock_args(rest=True) + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + copy.deepcopy(SRR['fcp_record']), + SRR['empty_good'], + SRR['generic_error'], + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_mock_object().apply() + assert 'Error on deleting fcp policy: calling: ' + \ + 'protocols/san/fcp/services/671aa46e-11ad-11ec-a267-005056b30cfa: got Expected error.' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_disable(self, mock_request): + '''Test successful rest disable''' + data = self.mock_args(rest=True) + data['status'] = 'down' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + copy.deepcopy(SRR['fcp_record']), + SRR['empty_good'], + + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_enable(self, mock_request): + '''Test successful rest enable''' + data = self.mock_args(rest=True) + data['status'] = 'up' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + copy.deepcopy(SRR['fcp_record_disabled']), + SRR['empty_good'], + + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error_enabled_change(self, mock_request): + '''Test error rest change''' + data = self.mock_args(rest=True) + data['status'] = 'down' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + copy.deepcopy(SRR['fcp_record']), + SRR['generic_error'], + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_mock_object().apply() + assert 'Error on modifying fcp: calling: ' + \ + 'protocols/san/fcp/services/671aa46e-11ad-11ec-a267-005056b30cfa: ' + \ + 'got Expected error.' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsd.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsd.py new file mode 100644 index 000000000..5076af5f9 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsd.py @@ -0,0 +1,136 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP na_ontap_fdsd Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fdsd \ + import NetAppOntapFDSD as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + args = { + 'name': 'test', + 'vserver': 'vserver1', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'ntfs_record': ( + 200, { + 'records': [{ + 'vserver': 'vserver1', + 'ntfs_sd': 'sd1'}], + 'num_records': 1}, + None), + +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' test missing arguements ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' remove Security Descriptor ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['ntfs_record'], + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Idempotent test ''' + args = dict(default_args()) + args['name'] = 'sd1' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['ntfs_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Create security descriptor''' + args = dict(default_args()) + args['name'] = 'new_sd' + print(args) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsp.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsp.py new file mode 100644 index 000000000..d523e7062 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdsp.py @@ -0,0 +1,134 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP na_ontap_fdsp Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fdsp \ + import NetAppOntapFDSP as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + args = { + 'name': 'test', + 'vserver': 'vserver1', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'security_policy_record': ( + 200, { + 'records': [{ + 'vserver': 'vserver1', + 'policy_name': 'test'}], + 'num_records': 1}, + None), + +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' test missing arguements ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Create security policies''' + args = dict(default_args()) + args['name'] = 'new_security_policy' + print(args) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' remove Security policies ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['security_policy_record'], + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Idempotent test ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['security_policy_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdss.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdss.py new file mode 100644 index 000000000..22e06fc1f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fdss.py @@ -0,0 +1,102 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP na_ontap_fdsg Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fdss \ + import NetAppOntapFDSS as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + args = { + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver1', + 'name': 'policy1' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'job_id_record': ( + 200, { + 'job': { + 'uuid': '94b6e6a7-d426-11eb-ac81-00505690980f', + '_links': {'self': {'href': '/api/cluster/jobs/94b6e6a7-d426-11eb-ac81-00505690980f'}}}, + 'cli_output': ' Use the "job show -id 2379" command to view the status of this operation.'}, None), + 'job_response_record': ( + 200, { + "uuid": "f03ccbb6-d8bb-11eb-ac81-00505690980f", + "description": "File Directory Security Apply Job", + "state": "success", + "message": "Complete: Operation completed successfully. File ACLs modified using policy \"policy1\" on Vserver \"GBSMNAS80LD\". File count: 0. [0]", + "code": 0, + "start_time": "2021-06-29T05:25:26-04:00", + "end_time": "2021-06-29T05:25:26-04:00" + }, None + ) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' test missing arguements ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_success(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Create job to apply policy to directory ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['job_id_record'], + SRR['job_response_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py new file mode 100644 index 000000000..94af48ed8 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_directory_policy.py @@ -0,0 +1,136 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_file_directory_policy ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_file_directory_policy \ + import NetAppOntapFilePolicy as policy_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + request = xml.to_string().decode('utf-8') + if self.kind == 'error': + raise netapp_utils.zapi.NaApiError('test', 'expect error') + elif request.startswith(""): + xml = None # or something that may the logger happy, and you don't need @patch anymore + # or + # xml = build_ems_log_response() + elif request.startswith(""): + if self.kind == 'create': + xml = self.build_sd_info() + else: + xml = self.build_sd_info(self.params) + elif request.startswith(""): + xml = self.build_sd_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_sd_info(data=None): + xml = netapp_utils.zapi.NaElement('xml') + attributes = {} + if data is not None: + attributes = {'num-records': 1, + 'attributes-list': {'file-directory-security-policy': {'policy-name': data['policy_name']}}} + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_file_directory_policy ''' + + def mock_args(self): + return { + 'vserver': 'vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_policy_mock_object(self, type='zapi', kind=None, status=None): + policy_obj = policy_module() + if type == 'zapi': + if kind is None: + policy_obj.server = MockONTAPConnection() + else: + policy_obj.server = MockONTAPConnection(kind=kind, data=status) + return policy_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + policy_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_successfully_create_policy(self): + data = self.mock_args() + data['policy_name'] = 'test_policy' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_mock_object('zapi', 'create', data).apply() + assert exc.value.args[0]['changed'] + + def test_error(self): + data = self.mock_args() + data['policy_name'] = 'test_policy' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).get_policy_iter() + assert exc.value.args[0]['msg'] == 'Error fetching file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).create_policy() + assert exc.value.args[0]['msg'] == 'Error creating file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).remove_policy() + assert exc.value.args[0]['msg'] == 'Error removing file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + data['path'] = '/vol' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).get_task_iter() + assert exc.value.args[0]['msg'] == 'Error fetching task from file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).add_task_to_policy() + assert exc.value.args[0]['msg'] == 'Error adding task to file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).remove_task_from_policy() + assert exc.value.args[0]['msg'] == 'Error removing task from file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).modify_task(dict()) + assert exc.value.args[0]['msg'] == 'Error modifying task in file-directory policy test_policy: NetApp API failed. Reason - test:expect error' + + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_mock_object('zapi', 'error', data).set_sd() + assert exc.value.args[0]['msg'] == 'Error applying file-directory policy test_policy: NetApp API failed. Reason - test:expect error' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions.py new file mode 100644 index 000000000..b25dca7ab --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions.py @@ -0,0 +1,647 @@ +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_warning_was_raised, print_warnings, \ + patch_ansible, call_main, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_file_security_permissions \ + import NetAppOntapFileSecurityPermissions as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def build_acl(user, access='access_allow', access_control='file_directory', apply_to=None, inherited=None, advanced_rights='all', rights=None): + if apply_to is None: + apply_to = {'this_folder': True} + if advanced_rights == 'all': + advanced_rights = { + 'append_data': True, + 'delete': True, + 'delete_child': True, + 'execute_file': True, + 'full_control': True, + 'read_attr': True, + 'read_data': True, + 'read_ea': True, + 'read_perm': True, + 'synchronize': True, + 'write_attr': True, + 'write_data': True, + 'write_ea': True, + 'write_owner': True, + 'write_perm': True + } + + acl = { + 'access': access, + 'access_control': access_control, + 'advanced_rights': advanced_rights, + 'apply_to': apply_to, + 'user': user + } + if inherited is not None: + acl['inherited'] = inherited + if rights is not None: + acl['rights'] = rights + return acl + + +SRR = rest_responses({ + 'non_acl': (200, { + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_only_inherited_acl': (200, { + 'acls': [ + build_acl('Everyone', inherited=True) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_multiple_user': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9'), + build_acl('SERVER_CIFS_TE\\mohan11'), + build_acl('Everyone', inherited=True) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_single_user_deny': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9', access='access_deny') + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_single_user_deny_empty_advrights': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9', access='access_deny', advanced_rights={}) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_single_user_deny_empty_advrights_mohan11': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9', access='access_deny', advanced_rights={}) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'SERVER_CIFS_TE\\mohan11', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_single_user_rights': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9', access='access_deny', advanced_rights={}, rights='full_control') + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'slag_acl_same_user': (200, { + 'acls': [ + build_acl('SERVER_CIFS_TE\\mohan11', access_control='slag', apply_to={'files': True}, advanced_rights={"append_data": True}, access='access_deny'), + build_acl('SERVER_CIFS_TE\\mohan11', access_control='slag', apply_to={'files': True}, advanced_rights={"append_data": True}) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'svm_id': (200, { + 'uuid': '55bcb009' + }, None), + 'error_655865': (400, None, {'code': 655865, 'message': 'Expected error'}), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'path': '/vol200/aNewFile.txt', + 'acls': [ + { + "access": "access_allow", + "user": "SERVER_CIFS_TE\\mohan11", + "advanced_rights": {"append_data": True}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False} + }, + { + "access": "access_allow", + "user": "NETAPPAD\\mohan9", + "advanced_rights": {"append_data": True}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False} + }, + + ] +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "vserver", "path"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_create_file_directory_acl(): + ''' create file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['zero_records']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['success']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['zero_records']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + # Add ACLs to an SD only record + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + # create SD only + args = dict(DEFAULT_ARGS) + args.pop('acls') + assert create_and_apply(my_module, args)['changed'] + assert not create_and_apply(my_module, args)['changed'] + + +def test_add_file_directory_acl(): + ''' add file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/NETAPPAD%5Cmohan9', SRR['success']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['success']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']) + ]) + args = { + 'acls': [{ + "access": "access_deny", + "user": "NETAPPAD\\mohan9", + "advanced_rights": {"append_data": True}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + }] + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_file_directory_acl(): + ''' add file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/NETAPPAD%5Cmohan9', SRR['success']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_only_inherited_acl']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_only_inherited_acl']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['error_655865']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['generic_error']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['generic_error']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['generic_error']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['generic_error']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/user1', SRR['generic_error']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/user1', SRR['generic_error']) + ]) + + acl_obj = create_module(my_module, DEFAULT_ARGS) + acl_obj.svm_uuid = "55bcb009" + assert 'Error fetching file security' in expect_and_capture_ansible_exception(acl_obj.get_file_security_permissions, 'fail')['msg'] + assert 'Error creating file security' in expect_and_capture_ansible_exception(acl_obj.create_file_security_permissions, 'fail')['msg'] + assert 'Error adding file security' in expect_and_capture_ansible_exception(acl_obj.add_file_security_permissions_acl, 'fail', {})['msg'] + assert 'Error modifying file security' in expect_and_capture_ansible_exception(acl_obj.modify_file_security_permissions, 'fail', {})['msg'] + acl = {'user': 'user1'} + assert 'Error modifying file security' in expect_and_capture_ansible_exception(acl_obj.modify_file_security_permissions_acl, 'fail', acl)['msg'] + assert 'Error deleting file security permissions' in expect_and_capture_ansible_exception(acl_obj.delete_file_security_permissions_acl, 'fail', acl)['msg'] + # no network calls + assert 'Error: mismatch on path values: desired:' in expect_and_capture_ansible_exception( + acl_obj.get_modify_actions, 'fail', {'path': 'dummy'})['msg'] + + +def test_create_file_directory_slag(): + ''' create slag acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['zero_records']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['slag_acl_same_user']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['slag_acl_same_user']) + ]) + args = { + 'access_control': 'slag', + 'acls': [ + { + 'access': 'access_deny', + 'access_control': 'slag', + 'advanced_rights': {'append_data': True}, + 'apply_to': {'files': True, "this_folder": False, "sub_folders": False}, + 'user': 'SERVER_CIFS_TE\\mohan11' + }, + { + 'access': 'access_allow', + 'access_control': 'slag', + 'advanced_rights': {'append_data': True}, + 'apply_to': {'files': True, "this_folder": False, "sub_folders": False}, + 'user': 'SERVER_CIFS_TE\\mohan11' + } + ] + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_modify_file_directory_owner(): + ''' modify file owner ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny_empty_advrights']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny_empty_advrights_mohan11']), + ]) + args = { + 'acls': [{ + "access": "access_deny", + "user": "NETAPPAD\\mohan9", + "advanced_rights": {"append_data": False}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + }], + 'owner': 'SERVER_CIFS_TE\\mohan11' + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + # idempotency already tested in create and add + + +def test_modify_file_directory_acl_advrights(): + ''' add file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/NETAPPAD%5Cmohan9', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny_empty_advrights']), + ]) + args = { + 'acls': [{ + "access": "access_deny", + "user": "NETAPPAD\\mohan9", + "advanced_rights": {"append_data": False}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + }] + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + # idempotency already tested in create and add + + +def test_modify_file_directory_acl_rights(): + ''' add file_directory acl using rights + it always fails the validation check, as REST does not return rights + it is not idempotent for the same reason + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/NETAPPAD%5Cmohan9', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny_empty_advrights']), + # 2nd run + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/NETAPPAD%5Cmohan9', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny_empty_advrights']), + ]) + args = { + 'acls': [{ + "access": "access_deny", + "user": "NETAPPAD\\mohan9", + "rights": 'modify', + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + }], + 'validate_changes': 'error' + } + error = "Error - patch-acls still required for [{" + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + args['validate_changes'] = 'warn' + assert call_main(my_main, DEFAULT_ARGS, args)['changed'] + print_warnings() + assert_warning_was_raised('Error - patch-acls still required for [', partial_match=True) + + +def test_negative_acl_rights_and_advrights(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + args = { + 'access_control': 'file_directory', + 'acls': [{ + "access": "access_deny", + "user": "NETAPPAD\\mohan9", + "advanced_rights": {"append_data": False}, + "rights": 'modify', + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + }], + 'validate_changes': 'error' + + } + error = "Error: suboptions 'rights' and 'advanced_rights' are mutually exclusive." + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + del args['acls'][0]['rights'] + args['acls'][0]['access_control'] = "slag" + error = "Error: mismatch between top level value and ACL value for" + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + args['acls'][0]['apply_to'] = {"this_folder": False, "files": False, "sub_folders": False} + error = "Error: at least one suboption must be true for apply_to. Got: " + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_get_acl_actions_on_create(): + """ given a set of ACLs in self.parameters, split them in four groups, or fewer """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + + apply_to = {'this_folder': True, 'files': False, 'sub_folders': False} + + fd_prop_acls = [ + # All these ACLs fall into a single category, as file_directory and propagate are the defaults + {"access": "access_deny", "user": "user01", "apply_to": apply_to}, + {"access": "access_deny", "user": "user02", "apply_to": apply_to, 'access_control': 'file_directory'}, + {"access": "access_deny", "user": "user03", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'propagate'}, + {"access": "access_deny", "user": "user04", "apply_to": apply_to, 'propagation_mode': 'propagate'} + ] + + fd_replace_acls = [ + {"access": "access_deny", "user": "user11", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'replace'}, + {"access": "access_deny", "user": "user12", "apply_to": apply_to, 'propagation_mode': 'replace'} + ] + + slag_prop_acls = [ + {"access": "access_deny", "user": "user21", "apply_to": apply_to, 'access_control': 'slag'}, + {"access": "access_deny", "user": "user22", "apply_to": apply_to, 'access_control': 'slag', 'propagation_mode': 'propagate'}, + ] + + slag_replace_acls = [ + {"access": "access_deny", "user": "user31", "apply_to": apply_to, 'access_control': 'slag', 'propagation_mode': 'replace'}, + ] + + args = { + 'acls': fd_prop_acls, + 'validate_changes': 'error' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + acls = my_obj.get_acl_actions_on_create() + assert not any(acls[x] for x in acls) + assert my_obj.parameters['acls'] == fd_prop_acls + + args = { + 'acls': fd_prop_acls + fd_replace_acls + slag_prop_acls + slag_replace_acls, + 'validate_changes': 'error' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + acls = my_obj.get_acl_actions_on_create() + print('P_ACLS', acls) + print('C_ACLS', my_obj.parameters['acls']) + assert len(acls['post-acls']) == 5 + assert my_obj.parameters['acls'] == fd_prop_acls + + args = { + 'acls': slag_replace_acls, + 'validate_changes': 'error' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + acls = my_obj.get_acl_actions_on_create() + assert not any(acls[x] for x in acls) + assert my_obj.parameters['acls'] == slag_replace_acls + + +def test_get_acl_actions_on_create_special(): + """ given a set of ACLs in self.parameters, split them in four groups, or fewer """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + + apply_to = {'this_folder': True, 'files': False, 'sub_folders': False} + + fd_prop_acls = [ + # All these ACLs fall into a single category, as file_directory and propagate are the defaults + {"access": "access_deny", "user": "user01", "apply_to": apply_to}, + {"access": "access_deny", "user": "user02", "apply_to": apply_to, 'access_control': 'file_directory'}, + {"access": "access_deny", "user": "user03", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'propagate'}, + {"access": "access_deny", "user": "user04", "apply_to": apply_to, 'propagation_mode': 'propagate'} + ] + + fd_replace_acls = [ + {"access": "access_deny", "user": "user11", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'replace'}, + {"access": "access_deny", "user": "user12", "apply_to": apply_to, 'propagation_mode': 'replace'} + ] + + slag_prop_acls = [ + {"access": "access_allowed_callback", "user": "user21", "apply_to": apply_to, 'access_control': 'slag'}, + {"access": "access_denied_callback", "user": "user22", "apply_to": apply_to, 'access_control': 'slag', 'propagation_mode': 'propagate'}, + ] + + slag_replace_acls = [ + {"access": "access_deny", "user": "user31", "apply_to": apply_to, 'access_control': 'slag', 'propagation_mode': 'replace'}, + ] + + fd_replace_acls_conflict = [ + {"access": "access_denied_callback", "user": "user11", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'replace'}, + {"access": "access_allowed_callback", "user": "user12", "apply_to": apply_to, 'propagation_mode': 'replace'} + ] + + args = { + 'acls': fd_prop_acls + fd_replace_acls + slag_prop_acls + slag_replace_acls, + 'validate_changes': 'error' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + acls = my_obj.get_acl_actions_on_create() + print('P_ACLS', acls) + print('C_ACLS', my_obj.parameters['acls']) + assert len(acls['post-acls']) == 7 + assert my_obj.parameters['acls'] == slag_prop_acls + + args = { + 'acls': fd_prop_acls + fd_replace_acls_conflict + slag_prop_acls + slag_replace_acls, + 'validate_changes': 'error' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + error = 'with access access_allowed_callback conflicts with other ACLs using accesses' + assert error in expect_and_capture_ansible_exception(my_obj.get_acl_actions_on_create, 'fail')['msg'] + + +def test_negative_unsupported_version(): + ''' create slag acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_9_1']), + # ('GET', 'svm/svms', SRR['svm_id']), + # ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + # ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['success']), + # ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['slag_acl_same_user']), + # ('GET', 'cluster', SRR['is_rest_9_10_1']), + # ('GET', 'svm/svms', SRR['svm_id']), + # ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['slag_acl_same_user']) + ]) + args = { + 'access_control': 'slag', + 'acls': [ + { + 'access': 'access_deny', + 'access_control': 'slag', + 'advanced_rights': {'append_data': True}, + 'apply_to': {'files': True, "this_folder": False, "sub_folders": False}, + 'user': 'SERVER_CIFS_TE\\mohan11' + }, + { + 'access': 'access_allow', + 'access_control': 'slag', + 'advanced_rights': {'append_data': True}, + 'apply_to': {'files': True, "this_folder": False, "sub_folders": False}, + 'user': 'SERVER_CIFS_TE\\mohan11' + } + ] + } + error = 'Error: na_ontap_file_security_permissions only supports REST, and requires ONTAP 9.9.1 or later. Found: 9.8.0.' + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + error = 'Minimum version of ONTAP for access_control is (9, 10, 1)' + msg = call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + assert error in msg + error = 'Minimum version of ONTAP for acls.access_control is (9, 10, 1)' + assert error in msg + + +def test_match_acl_with_acls(): + """ given a set of ACLs in self.parameters, split them in four groups, or fewer """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + + apply_to = {'this_folder': True, 'files': False, 'sub_folders': False} + + fd_prop_acls = [ + # All these ACLs fall into a single category, as file_directory and propagate are the defaults + {"access": "access_deny", "user": "user01", "apply_to": apply_to}, + {"access": "access_deny", "user": "user02", "apply_to": apply_to, 'access_control': 'file_directory'}, + {"access": "access_deny", "user": "user03", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'propagate'}, + {"access": "access_deny", "user": "user04", "apply_to": apply_to, 'propagation_mode': 'propagate'} + ] + + fd_replace_acls = [ + {"access": "access_deny", "user": "user11", "apply_to": apply_to, 'access_control': 'file_directory', 'propagation_mode': 'replace'}, + {"access": "access_deny", "user": "user12", "apply_to": apply_to, 'propagation_mode': 'replace'} + ] + + acl = fd_prop_acls[3] + my_obj = create_module(my_module, DEFAULT_ARGS) + assert acl == my_obj.match_acl_with_acls(acl, fd_prop_acls) + assert my_obj.match_acl_with_acls(acl, fd_replace_acls) is None + error = 'Error: found more than one desired ACLs with same user, access, access_control and apply_to' + assert error in expect_and_capture_ansible_exception(my_obj.match_acl_with_acls, 'fail', acl, fd_prop_acls + fd_prop_acls)['msg'] + + +def test_validate_changes(): + """ verify nothing needs to be changed """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/file-security/permissions/None/%2Fvol200%2FaNewFile.txt', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/file-security/permissions/None/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']), + ]) + args = { + 'validate_changes': 'ignore' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + assert my_obj.validate_changes('create', {}) is None + args = { + 'validate_changes': 'error' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + error = 'Error - create still required after create' + assert error in expect_and_capture_ansible_exception(my_obj.validate_changes, 'fail', 'create', {})['msg'] + args = { + 'validate_changes': 'warn', + 'owner': 'new_owner' + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + warning = "Error - modify: {'owner': 'new_owner'} still required after {'a': 'b'}" + assert my_obj.validate_changes('create', {'a': 'b'}) is None + assert_warning_was_raised(warning, partial_match=True) + assert_warning_was_raised('post-acls still required for', partial_match=True) + assert_warning_was_raised('delete-acls still required for', partial_match=True) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions_acl.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions_acl.py new file mode 100644 index 000000000..510f04a9e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_file_security_permissions_acl.py @@ -0,0 +1,331 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + patch_ansible, assert_warning_was_raised, call_main, print_warnings, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + get_mock_record, patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_file_security_permissions_acl\ + import NetAppOntapFileSecurityPermissionsACL as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def build_acl(user, access='access_allow', access_control='file_directory', apply_to=None, inherited=None, advanced_rights='all', rights=None): + if apply_to is None: + apply_to = {'this_folder': True} + if advanced_rights == 'all': + advanced_rights = { + 'append_data': True, + 'delete': True, + 'delete_child': True, + 'execute_file': True, + 'full_control': True, + 'read_attr': True, + 'read_data': True, + 'read_ea': True, + 'read_perm': True, + 'synchronize': True, + 'write_attr': True, + 'write_data': True, + 'write_ea': True, + 'write_owner': True, + 'write_perm': True + } + + acl = { + 'access': access, + 'access_control': access_control, + 'advanced_rights': advanced_rights, + 'apply_to': apply_to, + 'user': user + } + if inherited is not None: + acl['inherited'] = inherited + if rights is not None: + acl['rights'] = rights + return acl + + +SRR = rest_responses({ + 'non_acl': (200, { + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_only_inherited_acl': (200, { + 'acls': [ + build_acl('Everyone', inherited=True) + ], + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_multiple_user': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9'), + build_acl('SERVER_CIFS_TE\\mohan11'), + build_acl('Everyone', inherited=True) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_multiple_user_adv_rights': (200, { + 'acls': [ + build_acl('NETAPPAD\\mohan9'), + build_acl('SERVER_CIFS_TE\\mohan11', advanced_rights={"append_data": True}), + build_acl('Everyone', inherited=True) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'fd_acl_single_user_deny': (200, { + 'acls': [ + build_acl('SERVER_CIFS_TE\\mohan11', access='access_deny') + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'slag_acl_same_user': (200, { + 'acls': [ + build_acl('SERVER_CIFS_TE\\mohan11', access_control='slag', apply_to={'files': True}, advanced_rights={"append_data": True}, access='access_deny'), + build_acl('SERVER_CIFS_TE\\mohan11', access_control='slag', apply_to={'files': True}, advanced_rights={"append_data": True}) + ], + 'control_flags': '0x8014', + 'group': 'BUILTIN\\Administrators', + 'owner': 'BUILTIN\\Administrators', + 'path': '/vol200/aNewFile.txt', + 'svm': {'name': 'ansible_ipspace_datasvm', 'uuid': '55bcb009'} + }, None), + 'svm_id': (200, { + 'uuid': '55bcb009' + }, None), + 'error_655865': (400, None, {'code': 655865, 'message': 'Expected error'}), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'path': '/vol200/aNewFile.txt', + 'access_control': 'file_directory', + "access": "access_allow", + "acl_user": "SERVER_CIFS_TE\\mohan11", +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "vserver", "path"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_create_file_directory_acl(): + ''' create file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']) + ]) + module_args = { + "advanced_rights": {"append_data": True}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_file_directory_acl(): + ''' modify file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user_adv_rights']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user_adv_rights']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user_adv_rights']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user_adv_rights']), + ]) + module_args = { + 'advanced_rights': {'append_data': True, 'delete': False}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + 'rights': 'full_control', + } + error = "Error - modify: {'rights': 'full_control'} still required after {'rights': 'full_control'}" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_delete_file_directory_acl(): + ''' add file_directory acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_multiple_user']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_only_inherited_acl']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['error_655865']) + ]) + module_args = { + "advanced_rights": {"append_data": True}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + "state": "absent" + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_acl_rights_and_advrights(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + args = { + 'access_control': 'file_directory', + "access": "access_deny", + "acl_user": "NETAPPAD\\mohan9", + "advanced_rights": {"append_data": False}, + "rights": 'modify', + "apply_to": {"this_folder": True, "files": False, "sub_folders": False}, + 'validate_changes': 'error' + + } + error = "Error: suboptions 'rights' and 'advanced_rights' are mutually exclusive." + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + + del args['rights'] + args['apply_to'] = {"this_folder": False, "files": False, "sub_folders": False} + error = "Error: at least one suboption must be true for apply_to. Got: " + assert error in call_main(my_main, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['generic_error']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['generic_error']), + ('PATCH', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['generic_error']), + ('DELETE', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl/SERVER_CIFS_TE%5Cmohan11', SRR['generic_error']) + ]) + module_args = { + "advanced_rights": {"append_data": True}, + "apply_to": {"this_folder": True, "files": False, "sub_folders": False} + } + + acl_obj = create_module(my_module, DEFAULT_ARGS, module_args) + acl_obj.svm_uuid = "55bcb009" + assert 'Error fetching file security' in expect_and_capture_ansible_exception(acl_obj.get_file_security_permissions_acl, 'fail')['msg'] + assert 'Error creating file security' in expect_and_capture_ansible_exception(acl_obj.create_file_security_permissions_acl, 'fail')['msg'] + assert 'Error modifying file security' in expect_and_capture_ansible_exception(acl_obj.modify_file_security_permissions_acl, 'fail')['msg'] + assert 'Error deleting file security permissions' in expect_and_capture_ansible_exception(acl_obj.delete_file_security_permissions_acl, 'fail')['msg'] + assert 'Internal error - unexpected action bad_action' in expect_and_capture_ansible_exception(acl_obj.build_body, 'fail', 'bad_action')['msg'] + acl = build_acl('user') + acls = [acl, acl] + assert 'Error matching ACLs, found more than one match. Found' in expect_and_capture_ansible_exception(acl_obj.match_acl_with_acls, 'fail', + acl, acls)['msg'] + + +def test_create_file_directory_slag(): + ''' create slag acl and idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['non_acl']), + ('POST', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt/acl', SRR['success']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['slag_acl_same_user']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_id']), + ('GET', 'protocols/file-security/permissions/55bcb009/%2Fvol200%2FaNewFile.txt', SRR['slag_acl_same_user']) + ]) + module_args = { + 'access_control': 'slag', + 'access': 'access_deny', + 'advanced_rights': {'append_data': True}, + 'apply_to': {'files': True}, + 'acl_user': 'SERVER_CIFS_TE\\mohan11' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_validate_changes(): + """ verify nothing needs to be changed """ + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/file-security/permissions/None/%2Fvol200%2FaNewFile.txt', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/file-security/permissions/None/%2Fvol200%2FaNewFile.txt', SRR['fd_acl_single_user_deny']), + ]) + args = { + "advanced_rights": {"append_data": True}, + 'apply_to': {'files': True}, + 'validate_changes': 'ignore', + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + assert my_obj.validate_changes('create', {}) is None + args = { + "advanced_rights": {"append_data": True}, + 'apply_to': {'files': True}, + 'validate_changes': 'error', + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + error = 'Error - create still required after create' + assert error in expect_and_capture_ansible_exception(my_obj.validate_changes, 'fail', 'create', {})['msg'] + args = { + 'access': 'access_deny', + 'advanced_rights': { + 'append_data': False, + }, + 'apply_to': {'this_folder': True}, + 'validate_changes': 'warn', + } + my_obj = create_module(my_module, DEFAULT_ARGS, args) + warning = "Error - modify: {'advanced_rights': {'append_data': False}} still required after {'a': 'b'}" + assert my_obj.validate_changes('create', {'a': 'b'}) is None + print_warnings() + assert_warning_was_raised(warning, partial_match=True) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py new file mode 100644 index 000000000..b23a897a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firewall_policy.py @@ -0,0 +1,263 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_firewall_policy \ + import NetAppONTAPFirewallPolicy as fp_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.data = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'policy': + xml = self.build_policy_info(self.data) + if self.kind == 'config': + xml = self.build_firewall_config_info(self.data) + self.xml_out = xml + return xml + + @staticmethod + def build_policy_info(data): + ''' build xml data for net-firewall-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'net-firewall-policy-info': { + 'policy': data['policy'], + 'service': data['service'], + 'allow-list': [ + {'ip-and-mask': '1.2.3.0/24'} + ] + } + } + } + + xml.translate_struct(attributes) + return xml + + @staticmethod + def build_firewall_config_info(data): + ''' build xml data for net-firewall-config-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'attributes': { + 'net-firewall-config-info': { + 'is-enabled': 'true', + 'is-logging': 'false' + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_policy = { + 'policy': 'test', + 'service': 'none', + 'vserver': 'my_vserver', + 'allow_list': '1.2.3.0/24' + } + self.mock_config = { + 'node': 'test', + 'enable': 'enable', + 'logging': 'enable' + } + + def mock_policy_args(self): + return { + 'policy': self.mock_policy['policy'], + 'service': self.mock_policy['service'], + 'vserver': self.mock_policy['vserver'], + 'allow_list': [self.mock_policy['allow_list']], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def mock_config_args(self): + return { + 'node': self.mock_config['node'], + 'enable': self.mock_config['enable'], + 'logging': self.mock_config['logging'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_mock_object(self, kind=None): + """ + Helper method to return an na_ontap_firewall_policy object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_firewall_policy object + """ + obj = fp_module() + obj.autosupport_log = Mock(return_value=None) + if kind is None: + obj.server = MockONTAPConnection() + else: + mock_data = self.mock_config if kind == 'config' else self.mock_policy + obj.server = MockONTAPConnection(kind=kind, data=mock_data) + return obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + fp_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_helper_firewall_policy_attributes(self): + ''' helper returns dictionary with vserver, service and policy details ''' + data = self.mock_policy + set_module_args(self.mock_policy_args()) + result = self.get_mock_object('policy').firewall_policy_attributes() + del data['allow_list'] + assert data == result + + def test_helper_validate_ip_addresses_positive(self): + ''' test if helper validates if IP is a network address ''' + data = self.mock_policy_args() + data['allow_list'] = ['1.2.0.0/16', '1.2.3.0/24'] + set_module_args(data) + result = self.get_mock_object().validate_ip_addresses() + assert result is None + + def test_helper_validate_ip_addresses_negative(self): + ''' test if helper validates if IP is a network address ''' + data = self.mock_policy_args() + data['allow_list'] = ['1.2.0.10/16', '1.2.3.0/24'] + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_mock_object().validate_ip_addresses() + msg = 'Error: Invalid IP network value 1.2.0.10/16.' \ + ' Please specify a network address without host bits set: ' \ + '1.2.0.10/16 has host bits set.' + assert exc.value.args[0]['msg'] == msg + + def test_get_nonexistent_policy(self): + ''' Test if get_firewall_policy returns None for non-existent policy ''' + set_module_args(self.mock_policy_args()) + result = self.get_mock_object().get_firewall_policy() + assert result is None + + def test_get_existing_policy(self): + ''' Test if get_firewall_policy returns policy details for existing policy ''' + data = self.mock_policy_args() + set_module_args(data) + result = self.get_mock_object('policy').get_firewall_policy() + assert result['service'] == data['service'] + assert result['allow_list'] == ['1.2.3.0/24'] # from build_policy_info() + + def test_successful_create(self): + ''' Test successful create ''' + set_module_args(self.mock_policy_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object().apply() + assert exc.value.args[0]['changed'] + + def test_create_idempotency(self): + ''' Test create idempotency ''' + set_module_args(self.mock_policy_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object('policy').apply() + assert not exc.value.args[0]['changed'] + + def test_successful_delete(self): + ''' Test delete existing job ''' + data = self.mock_policy_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + def test_delete_idempotency(self): + ''' Test delete idempotency ''' + data = self.mock_policy_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object().apply() + assert not exc.value.args[0]['changed'] + + def test_successful_modify(self): + ''' Test successful modify allow_list ''' + data = self.mock_policy_args() + data['allow_list'] = ['1.2.0.0/16'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + def test_successful_modify_mutiple_ips(self): + ''' Test successful modify allow_list ''' + data = self.mock_policy_args() + data['allow_list'] = ['1.2.0.0/16', '1.0.0.0/8'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + def test_successful_modify_mutiple_ips_contain_existing(self): + ''' Test successful modify allow_list ''' + data = self.mock_policy_args() + data['allow_list'] = ['1.2.3.0/24', '1.0.0.0/8'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + def test_get_nonexistent_config(self): + ''' Test if get_firewall_config returns None for non-existent node ''' + set_module_args(self.mock_config_args()) + result = self.get_mock_object().get_firewall_config_for_node() + assert result is None + + def test_get_existing_config(self): + ''' Test if get_firewall_config returns policy details for existing node ''' + data = self.mock_config_args() + set_module_args(data) + result = self.get_mock_object('config').get_firewall_config_for_node() + assert result['enable'] == 'enable' # from build_config_info() + assert result['logging'] == 'disable' # from build_config_info() + + def test_successful_modify_config(self): + ''' Test successful modify allow_list ''' + data = self.mock_config_args() + data['enable'] = 'disable' + data['logging'] = 'enable' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_mock_object('config').apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py new file mode 100644 index 000000000..140b91cd7 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_firmware_upgrade.py @@ -0,0 +1,891 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_firmware_upgrade ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, call +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade\ + import NetAppONTAPFirmwareUpgrade as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def mock_warn(me, log): + print('WARNING', log) + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, parm1=None, parm2=None, parm3=None): + ''' save arguments ''' + self.type = kind + self.parm1 = parm1 + self.parm2 = parm2 + # self.parm3 = parm3 + self.xml_in = None + self.xml_out = None + self.firmware_type = 'None' + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + print('xml_in', xml.to_string()) + print('kind', self.type) + if self.type == 'firmware_upgrade': + xml = self.build_firmware_upgrade_info(self.parm1, self.parm2) + if self.type == 'acp': + xml = self.build_acp_firmware_info(self.firmware_type) + if self.type == 'disk_fw_info': + xml = self.build_disk_firmware_info(self.firmware_type) + if self.type == 'shelf_fw_info': + xml = self.build_shelf_firmware_info(self.firmware_type) + if self.type == 'firmware_download': + xml = self.build_system_cli_info(error=self.parm1) + if self.type == 'exception': + raise netapp_utils.zapi.NaApiError(self.parm1, self.parm2) + self.xml_out = xml + print('xml_out', xml.to_string()) + return xml + + @staticmethod + def build_firmware_upgrade_info(version, node): + ''' build xml data for service-processor firmware info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'num-records': 1, + 'attributes-list': {'service-processor-info': {'firmware-version': '3.4'}} + } + xml.translate_struct(data) + return xml + + @staticmethod + def build_acp_firmware_info(firmware_type): + ''' build xml data for acp firmware info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + # 'num-records': 1, + 'attributes-list': {'storage-shelf-acp-module': {'state': 'firmware_update_required'}} + } + xml.translate_struct(data) + return xml + + @staticmethod + def build_disk_firmware_info(firmware_type): + ''' build xml data for disk firmware info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'num-records': 1, + 'attributes-list': [{'storage-disk-info': {'disk-uid': '1', 'disk-inventory-info': {'firmware-revision': '1.2.3'}}}] + } + xml.translate_struct(data) + return xml + + @staticmethod + def build_shelf_firmware_info(firmware_type): + ''' build xml data for shelf firmware info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'num-records': 1, + 'attributes-list': [{'storage-shelf-info': {'shelf-modules': {'storage-shelf-module-info': {'module-id': '1', 'module-fw-revision': '1.2.3'}}}}] + } + xml.translate_struct(data) + return xml + + @staticmethod + def build_system_cli_info(error=None): + ''' build xml data for system-cli info ''' + if error is None: + # make it a string, to be able to compare easily + error = "" + xml = netapp_utils.zapi.NaElement('results') + output = "" if error == 'empty_output' else 'Download complete.' + data = { + 'cli-output': output, + 'cli-result-value': 1 + } + xml.translate_struct(data) + status = "failed" if error == 'status_failed' else "passed" + if error != 'no_status_attr': + xml.add_attr('status', status) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.use_vsim = False + + def set_default_args(self): + if self.use_vsim: + hostname = '10.10.10.10' + username = 'admin' + password = 'admin' + node = 'vsim1' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + node = 'abc' + package = 'test1.zip' + force_disruptive_update = False + clear_logs = True + install_baseline_image = False + update_type = 'serial_full' + use_rest = 'never' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'node': node, + 'package': package, + 'clear_logs': clear_logs, + 'install_baseline_image': install_baseline_image, + 'update_type': update_type, + 'https': 'true', + 'force_disruptive_update': force_disruptive_update, + 'use_rest': use_rest, + 'feature_flags': {'trace_apis': True} + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_sp_firmware_get_called(self): + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'service-processor' + set_module_args(module_args) + my_obj = my_module() + my_obj.server = self.server + firmware_image_get = my_obj.firmware_image_get('node') + print('Info: test_firmware_upgrade_get: %s' % repr(firmware_image_get)) + assert firmware_image_get is None + + def test_negative_package_and_baseline_present(self): + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'service-processor' + module_args['package'] = 'test1.zip' + module_args['install_baseline_image'] = True + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(module_args) + my_module() + msg = 'With ZAPI and firmware_type set to service-processor: do not specify both package and install_baseline_image: true.' + print('info: ' + exc.value.args[0]['msg']) + assert exc.value.args[0]['msg'] == msg + + def test_negative_package_and_baseline_absent(self): + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'service-processor' + module_args.pop('package') + module_args['install_baseline_image'] = False + module_args['force_disruptive_update'] = True + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(module_args) + my_module() + msg = 'With ZAPI and firmware_type set to service-processor: specify at least one of package or install_baseline_image: true.' + print('info: ' + exc.value.args[0]['msg']) + assert exc.value.args[0]['msg'] == msg + + def test_ensure_acp_firmware_update_required_called(self): + ''' a test tp verify acp firmware upgrade is required or not ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'acp' + set_module_args(module_args) + my_obj = my_module() + # my_obj.server = self.server + my_obj.server = MockONTAPConnection(kind='acp') + acp_firmware_update_required = my_obj.acp_firmware_update_required() + print('Info: test_acp_firmware_upgrade_required_get: %s' % repr(acp_firmware_update_required)) + assert acp_firmware_update_required is True + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.sp_firmware_image_update') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.sp_firmware_image_update_progress_get') + def test_ensure_apply_for_firmware_upgrade_called(self, get_mock, upgrade_mock): + ''' updgrading firmware and checking idempotency ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package'] = 'test1.zip' + module_args['firmware_type'] = 'service-processor' + module_args['force_disruptive_update'] = True + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + if not self.use_vsim: + my_obj.server = MockONTAPConnection('firmware_upgrade', '3.5', 'true') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + upgrade_mock.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.shelf_firmware_upgrade') + def test_shelf_firmware_upgrade(self, upgrade_mock): + ''' Test shelf firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'shelf' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + assert not upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.shelf_firmware_upgrade') + def test_shelf_firmware_upgrade_force(self, upgrade_mock): + ''' Test shelf firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'shelf' + module_args['force_disruptive_update'] = True + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + upgrade_mock.return_value = True + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + assert upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.shelf_firmware_upgrade') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.shelf_firmware_update_required') + def test_shelf_firmware_upgrade_force_update_required(self, update_required_mock, upgrade_mock): + ''' Test shelf firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'shelf' + module_args['force_disruptive_update'] = True + module_args['shelf_module_fw'] = "version" + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + update_required_mock.return_value = True + upgrade_mock.return_value = True + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + assert upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.acp_firmware_upgrade') + def test_acp_firmware_upgrade(self, upgrade_mock): + ''' Test ACP firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'acp' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + assert not upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.acp_firmware_upgrade') + def test_acp_firmware_upgrade_force(self, upgrade_mock): + ''' Test ACP firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'acp' + module_args['force_disruptive_update'] = True + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection(kind='acp') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + assert upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.disk_firmware_upgrade') + def test_disk_firmware_upgrade(self, upgrade_mock): + ''' Test disk firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'disk' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert not exc.value.args[0]['changed'] + assert not upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.disk_firmware_upgrade') + def test_disk_firmware_upgrade_force(self, upgrade_mock): + ''' Test disk firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'disk' + module_args['force_disruptive_update'] = True + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + assert upgrade_mock.called + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.disk_firmware_upgrade') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_firmware_upgrade.NetAppONTAPFirmwareUpgrade.disk_firmware_update_required') + def test_disk_firmware_upgrade_force_update_required(self, update_required_mock, upgrade_mock): + ''' Test disk firmware upgrade ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['firmware_type'] = 'disk' + module_args['force_disruptive_update'] = True + module_args['disk_fw'] = "version" + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = self.server + update_required_mock.return_value = True + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_firmware_upgrade_apply: %s' % repr(exc.value)) + assert exc.value.args[0]['changed'] + assert upgrade_mock.called + + def test_acp_firmware_update_required(self): + ''' Test acp_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('acp') + result = my_obj.acp_firmware_update_required() + assert result + + def test_acp_firmware_update_required_false(self): + ''' Test acp_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection() + result = my_obj.acp_firmware_update_required() + assert not result + + def test_negative_acp_firmware_update_required(self): + ''' Test acp_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.acp_firmware_update_required() + msg = "Error fetching acp firmware details details: NetApp API failed. Reason - None:None" + assert msg in exc.value.args[0]['msg'] + + def test_disk_firmware_update_required(self): + ''' Test disk_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['disk_fw'] = '1.2.4' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('disk_fw_info') + result = my_obj.disk_firmware_update_required() + assert result + + def test_negative_disk_firmware_update_required(self): + ''' Test disk_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['disk_fw'] = '1.2.4' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.disk_firmware_update_required() + msg = "Error fetching disk module firmware details: NetApp API failed. Reason - None:None" + assert msg in exc.value.args[0]['msg'] + + def test_shelf_firmware_update_required(self): + ''' Test shelf_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['shelf_module_fw'] = '1.2.4' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('shelf_fw_info') + result = my_obj.shelf_firmware_update_required() + assert result + + def test_negative_shelf_firmware_update_required(self): + ''' Test shelf_firmware_update_required ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['shelf_module_fw'] = '1.2.4' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.shelf_firmware_update_required() + msg = "Error fetching shelf module firmware details: NetApp API failed. Reason - None:None" + assert msg in exc.value.args[0]['msg'] + + def test_firmware_download(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('firmware_download') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + msg = "Firmware download completed. Extra info: Download complete." + assert exc.value.args[0]['msg'] == msg + + def test_60(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception', 60, 'ZAPI timeout') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + msg = "Firmware download completed, slowly." + assert exc.value.args[0]['msg'] == msg + + def test_firmware_download_502(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception', 502, 'Bad GW') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + msg = "Firmware download still in progress." + assert exc.value.args[0]['msg'] == msg + + def test_firmware_download_502_as_error(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + module_args['fail_on_502_error'] = True + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception', 502, 'Bad GW') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "NetApp API failed. Reason - 502:Bad GW" + assert msg in exc.value.args[0]['msg'] + + def test_firmware_download_no_num_error(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('exception', 'some error string', 'whatever') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "NetApp API failed. Reason - some error string:whatever" + assert msg in exc.value.args[0]['msg'] + + def test_firmware_download_no_status_attr(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('firmware_download', 'no_status_attr') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "unable to download package from dummy_url: 'status' attribute missing." + assert exc.value.args[0]['msg'].startswith(msg) + + def test_firmware_download_status_failed(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('firmware_download', 'status_failed') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "unable to download package from dummy_url: check 'status' value." + assert exc.value.args[0]['msg'].startswith(msg) + + def test_firmware_download_empty_output(self): + ''' Test firmware download ''' + module_args = {} + module_args.update(self.set_default_args()) + module_args['package_url'] = 'dummy_url' + set_module_args(module_args) + my_obj = my_module() + if not self.use_vsim: + my_obj.server = MockONTAPConnection('firmware_download', 'empty_output') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "unable to download package from dummy_url: check console permissions." + assert exc.value.args[0]['msg'].startswith(msg) + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, {'num_records': 0}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'uuid_record': (200, + {'records': [{"uuid": '1cd8a442-86d1-11e0-ae1c-123478563412'}]}, None), + 'nodes_record': (200, + {'records': [{"name": 'node1'}, {"name": 'node2'}]}, None), + 'net_routes_record': (200, + {'records': [{"destination": {"address": "176.0.0.0", + "netmask": "24", + "family": "ipv4"}, + "gateway": '10.193.72.1', + "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412', + "svm": {"name": "test_vserver"}}]}, None), + 'modified_record': (200, + {'records': [{"destination": {"address": "0.0.0.0", + "netmask": "0", + "family": "ipv4"}, + "gateway": "10.193.72.1", + "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412', + "svm": {"name": "test_vserver"}}]}, None), + 'sp_state_online': (200, + {'service_processor': {'state': 'online'}}, None), + 'sp_state_rebooting': (200, + {'service_processor': {'state': 'rebooting'}}, None), + 'unexpected_arg': (400, None, 'Unexpected argument "service_processor.action"'), +} + + +def set_default_module_args(use_rest='always'): + hostname = 'hostname' + username = 'username' + password = 'password' + use_rest = 'always' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'https': 'true', + 'use_rest': use_rest, + 'package_url': 'https://download.site.com' + }) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successfully_download(mock_request, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # post download + SRR['is_rest'], + SRR['empty_good'], # post download + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + my_module().apply() + assert exc.value.args[0]['changed'] + print(mock_request.call_args) + json = {'url': 'https://download.site.com'} + expected = call('POST', 'cluster/software/download', None, json=json, headers=None, files=None) + assert mock_request.call_args == expected + data['server_username'] = 'user' + data['server_password'] = 'pass' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + my_module().apply() + print(mock_request.call_args) + json = {'url': 'https://download.site.com', 'username': 'user', 'password': 'pass'} + expected = call('POST', 'cluster/software/download', None, json=json, headers=None, files=None) + assert mock_request.call_args == expected + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_download(mock_request, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], # post download + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = 'Error downloading software: calling: cluster/software/download: got Expected error.' + assert msg in exc.value.args[0]['msg'] + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successfully_reboot_sp_and_download(mock_request, dont_sleep, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['uuid_record'], # get UUID + SRR['empty_good'], # patch reboot + SRR['empty_good'], # post download + SRR['sp_state_rebooting'], # get sp state + SRR['sp_state_rebooting'], # get sp state + SRR['sp_state_online'], # get sp state + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + my_module().apply() + assert exc.value.args[0]['changed'] + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_reboot_sp_and_download_bad_sp(mock_request, dont_sleep, patch_ansible): + """fail to read SP state""" + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['uuid_record'], # get UUID + SRR['empty_good'], # patch reboot + SRR['empty_good'], # post download + SRR['sp_state_rebooting'], # get sp state + SRR['sp_state_rebooting'], # get sp state + SRR['generic_error'], # get sp state + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = 'Error getting node SP state:' + assert msg in exc.value.args[0]['msg'] + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_reboot_sp_and_download_sp_timeout(mock_request, dont_sleep, patch_ansible): + """fail to read SP state""" + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + responses = [ + SRR['is_rest'], + SRR['uuid_record'], # get UUID + SRR['empty_good'], # patch reboot + SRR['empty_good'], # post download + ] + # 20 retries + responses.extend([SRR['sp_state_rebooting']] * 20) + responses.append(SRR['sp_state_online']) + responses.append(SRR['end_of_sequence']) + mock_request.side_effect = responses + with pytest.raises(AnsibleExitJson) as exc: + my_module().apply() + # msg = 'Error getting node SP state:' + # assert msg in exc.value.args[0]['msg'] + print('RETRIES', exc.value.args[0]) + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successfully_reboot_sp_and_download_cli(mock_request, dont_sleep, patch_ansible): + ''' switch back to REST CLI for reboot ''' + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['uuid_record'], # get UUID + SRR['unexpected_arg'], # patch reboot + SRR['empty_good'], # REST CLI reboot + SRR['empty_good'], # post download + SRR['sp_state_rebooting'], # get sp state + SRR['sp_state_rebooting'], # get sp state + SRR['sp_state_online'], # get sp state + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + my_module().apply() + assert exc.value.args[0]['changed'] + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_reboot_sp_and_download_cli(mock_request, dont_sleep, patch_ansible): + ''' switch back to REST CLI for reboot ''' + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['uuid_record'], # get UUID + SRR['unexpected_arg'], # patch reboot + SRR['generic_error'], # REST CLI reboot + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = 'Error rebooting node SP: reboot_sp requires ONTAP 9.10.1 or newer, falling back to CLI passthrough failed' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_reboot_sp_and_download_uuid_error(mock_request, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], # get UUID + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = 'Error reading node UUID: calling: cluster/nodes: got Expected error.' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_reboot_sp_and_download_node_not_found(mock_request, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], # get UUID + SRR['nodes_record'], # get nodes + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = 'Error: node not found node4, current nodes: node1, node2.' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_reboot_sp_and_download_nodes_get_error(mock_request, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['reboot_sp'] = True + data['node'] = 'node4' + data['firmware_type'] = 'service-processor' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], # get UUID + SRR['generic_error'], # get nodes + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = 'Error reading nodes: calling: cluster/nodes: got Expected error.' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_unsupported_option_with_rest(mock_request, patch_ansible): + data = set_default_module_args(use_rest='always') + data['state'] = 'present' + data['clear_logs'] = False + data['node'] = 'node4' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module().apply() + msg = "REST API currently does not support 'clear_logs'" + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py new file mode 100644 index 000000000..07e01940a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_flexcache.py @@ -0,0 +1,838 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP FlexCache Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, build_zapi_error, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_warning_was_raised, call_main, create_module, expect_and_capture_ansible_exception, patch_ansible, print_warnings + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_flexcache import NetAppONTAPFlexCache as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +flexcache_info = { + 'vserver': 'vserver', + 'origin-vserver': 'ovserver', + 'origin-volume': 'ovolume', + 'origin-cluster': 'ocluster', + 'volume': 'flexcache_volume', +} + +flexcache_get_info = { + 'attributes-list': [{ + 'flexcache-info': flexcache_info + }] +} + +flexcache_get_info_double = { + 'attributes-list': [ + { + 'flexcache-info': flexcache_info + }, + { + 'flexcache-info': flexcache_info + } + ] +} + + +def results_info(status): + return { + 'result-status': status, + 'result-jobid': 'job12345', + } + + +def job_info(state, error): + return { + 'num-records': 1, + 'attributes': { + 'job-info': { + 'job-state': state, + 'job-progress': 'progress', + 'job-completion': error, + } + } + } + + +ZRR = zapi_responses({ + 'flexcache_get_info': build_zapi_response(flexcache_get_info, 1), + 'flexcache_get_info_double': build_zapi_response(flexcache_get_info_double, 2), + 'job_running': build_zapi_response(job_info('running', None)), + 'job_success': build_zapi_response(job_info('success', None)), + 'job_error': build_zapi_response(job_info('failure', 'failure')), + 'job_error_no_completion': build_zapi_response(job_info('failure', None)), + 'job_other': build_zapi_response(job_info('other', 'other')), + 'result_async': build_zapi_response(results_info('in_progress')), + 'result_error': build_zapi_response(results_info('whatever')), + 'error_160': build_zapi_error(160, 'Volume volume on Vserver ansibleSVM must be unmounted before being taken offline or restricted'), + 'error_13001': build_zapi_error(13001, 'Volume volume in Vserver ansibleSVM must be offline to be deleted'), + 'error_15661': build_zapi_error(15661, 'Job not found'), + 'error_size': build_zapi_error('size', 'Size "50MB" ("52428800B") is too small. Minimum size is "80MB" ("83886080B")'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'volume': 'flexcache_volume', + 'vserver': 'vserver', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never' + } + error = 'missing required arguments:' + assert error in call_main(my_main, {}, module_args, fail=True)['msg'] + + +def test_missing_parameters(): + ''' fail if origin volume and origin verser are missing ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + } + error = 'Missing parameters:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_missing_parameter(): + ''' fail if origin verser parameter is missing ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'origin_volume': 'origin_volume', + } + error = 'Missing parameter: origin_vserver' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_get_flexcache(): + ''' get flexcache info ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ]) + module_args = { + 'use_rest': 'never', + 'origin_volume': 'origin_volume', + 'origin_cluster': 'origin_cluster', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + info = my_obj.flexcache_get() + assert info + assert 'origin_cluster' in info + + +def test_get_flexcache_double(): + ''' get flexcache info returns 2 entries! ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info_double']), + ]) + module_args = { + 'use_rest': 'never', + 'origin_volume': 'origin_volume', + + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error fetching FlexCache info: Multiple records found for %s:' % DEFAULT_ARGS['volume'] + assert error in expect_and_capture_ansible_exception(my_obj.flexcache_get, 'fail')['msg'] + + +def test_create_flexcache(): + ''' create flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '90', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_flexcach_no_wait(): + ''' create flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '90', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'time_out': 0 + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_flexcache(): + ''' create flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_error']), + # 2nd run + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['error']), + # 3rd run + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_error']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '90', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + } + error = 'Unexpected error when creating flexcache: results is:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = zapi_error_message('Error fetching job info') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = 'Error when creating flexcache' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_create_flexcache_idempotent(): + ''' create flexcache - already exists ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ]) + module_args = { + 'use_rest': 'never', + 'aggr_list': 'aggr1', + 'origin_volume': 'ovolume', + 'origin_vserver': 'ovserver', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_flexcache_autoprovision(): + ''' create flexcache with autoprovision''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '90', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'auto_provision_as': 'flexgroup', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_flexcache_autoprovision_idempotent(): + ''' create flexcache with autoprovision - already exists ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ]) + module_args = { + 'use_rest': 'never', + 'origin_volume': 'ovolume', + 'origin_vserver': 'ovserver', + 'auto_provision_as': 'flexgroup', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_flexcache_multiplier(): + ''' create flexcache with aggregate multiplier''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '90', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'aggr_list_multiplier': 2, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_flexcache_multiplier_idempotent(): + ''' create flexcache with aggregate multiplier - already exists ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ]) + module_args = { + 'use_rest': 'never', + 'aggr_list': 'aggr1', + 'origin_volume': 'ovolume', + 'origin_vserver': 'ovserver', + 'aggr_list_multiplier': 2, + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_flexcache_exists_no_force(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'flexcache-destroy-async', ZRR['error_13001']), + ]) + module_args = { + 'use_rest': 'never', + 'state': 'absent' + } + error = zapi_error_message('Error deleting FlexCache', 13001, 'Volume volume in Vserver ansibleSVM must be offline to be deleted') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_delete_flexcache_exists_with_force(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + module_args = { + 'use_rest': 'never', + 'force_offline': 'true', + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_flexcache_exists_with_force_no_wait(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['result_async']), + ]) + module_args = { + 'use_rest': 'never', + 'force_offline': 'true', + 'time_out': 0, + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_flexcache_exists_junctionpath_no_force(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['error_160']), + ]) + module_args = { + 'use_rest': 'never', + 'force_offline': 'true', + 'junction_path': 'jpath', + 'state': 'absent' + } + error = zapi_error_message('Error deleting FlexCache', 160, + 'Volume volume on Vserver ansibleSVM must be unmounted before being taken offline or restricted') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_delete_flexcache_exists_junctionpath_with_force(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-unmount', ZRR['success']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + module_args = { + 'use_rest': 'never', + 'force_offline': 'true', + 'junction_path': 'jpath', + 'force_unmount': 'true', + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_flexcache_not_exist(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'state': 'absent' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete_flexcache_exists_with_force(): + ''' delete flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['result_error']), + # 2nd run + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['error']), + # 3rd run + ('ZAPI', 'flexcache-get-iter', ZRR['flexcache_get_info']), + ('ZAPI', 'volume-offline', ZRR['success']), + ('ZAPI', 'flexcache-destroy-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_error']), + ]) + module_args = { + 'use_rest': 'never', + 'force_offline': 'true', + 'state': 'absent' + } + error = 'Unexpected error when deleting flexcache: results is:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = zapi_error_message('Error fetching job info') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = 'Error when deleting flexcache' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_create_flexcache_size_error(): + ''' create flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['error_size']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '50', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + } + error = zapi_error_message('Error creating FlexCache', 'size', 'Size "50MB" ("52428800B") is too small. Minimum size is "80MB" ("83886080B")') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_create_flexcache_time_out(dont_sleep): + ''' create flexcache ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['no_records']), + ('ZAPI', 'flexcache-create-async', ZRR['result_async']), + ('ZAPI', 'job-get', ZRR['job_running']), + ]) + module_args = { + 'use_rest': 'never', + 'size': '50', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'time_out': '2', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error when creating flexcache: job completion exceeded expected timer of: 2 seconds' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_error_zapi(): + ''' error in ZAPI calls ''' + register_responses([ + ('ZAPI', 'flexcache-get-iter', ZRR['error']), + ('ZAPI', 'volume-offline', ZRR['error']), + ('ZAPI', 'volume-unmount', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = zapi_error_message('Error fetching FlexCache info') + assert error in expect_and_capture_ansible_exception(my_obj.flexcache_get, 'fail')['msg'] + error = zapi_error_message('Error offlining FlexCache volume') + assert error in expect_and_capture_ansible_exception(my_obj.volume_offline, 'fail', None)['msg'] + error = zapi_error_message('Error unmounting FlexCache volume') + assert error in expect_and_capture_ansible_exception(my_obj.volume_unmount, 'fail', None)['msg'] + + +def test_check_job_status(): + ''' check_job_status ''' + register_responses([ + # job not found + ('ZAPI', 'job-get', ZRR['error_15661']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'job-get', ZRR['error_15661']), + # cserver job not found + ('ZAPI', 'job-get', ZRR['error_15661']), + ('ZAPI', 'vserver-get-iter', ZRR['cserver']), + ('ZAPI', 'job-get', ZRR['error_15661']), + # missing job-completion + ('ZAPI', 'job-get', ZRR['job_error_no_completion']), + # bad status + ('ZAPI', 'job-get', ZRR['job_other']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + # error = zapi_error_message('Error fetching FlexCache info') + error = 'cannot locate job with id: 1' + assert error in my_obj.check_job_status('1') + assert error in my_obj.check_job_status('1') + assert 'progress' in my_obj.check_job_status('1') + error = 'Unexpected job status in:' + assert error in expect_and_capture_ansible_exception(my_obj.check_job_status, 'fail', '1')['msg'] + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'one_flexcache_record': (200, dict(records=[ + dict(uuid='a1b2c3', + name='flexcache_volume', + svm=dict(name='vserver'), + ) + ], num_records=1), None), + 'one_flexcache_record_with_path': (200, dict(records=[ + dict(uuid='a1b2c3', + name='flexcache_volume', + svm=dict(name='vserver'), + path='path' + ) + ], num_records=1), None), +}) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + module_args = { + "use_rest": "never" + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_missing_arguments(): + ''' create flexcache ''' + register_responses([ + + ]) + args = dict(DEFAULT_ARGS) + del args['hostname'] + module_args = { + 'use_rest': 'always', + } + error = 'missing required arguments: hostname' + assert error in call_main(my_main, args, module_args, fail=True)['msg'] + + +def test_rest_create(): + ''' create flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['zero_record']), + ('POST', 'storage/flexcache/flexcaches', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'size': '50', # 80MB minimum + 'size_unit': 'mb', # 80MB minimum + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'origin_cluster': 'ocluster', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_create_no_action(): + ''' create flexcache idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_delete_no_action(): + ''' delete flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['zero_record']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_delete(): + ''' delete flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ('DELETE', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_delete_with_force(): + ''' delete flexcache, since there is no path, unmount is not called ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ('DELETE', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'force_unmount': True, + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_delete_with_force_and_path(): + ''' delete flexcache with unmount ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record_with_path']), + ('PATCH', 'storage/volumes/a1b2c3', SRR['empty_good']), + ('DELETE', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'force_unmount': True, + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_delete_with_force2_and_path(): + ''' delete flexcache with unmount and offline''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record_with_path']), + ('PATCH', 'storage/volumes/a1b2c3', SRR['empty_good']), + ('PATCH', 'storage/volumes/a1b2c3', SRR['empty_good']), + ('DELETE', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'force_offline': True, + 'force_unmount': True, + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_prepopulate_no_action(): + ''' modify flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'prepopulate': { + 'dir_paths': ['/'], + 'force_prepopulate_if_already_created': False + } + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_prepopulate(): + ''' modify flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ('PATCH', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'prepopulate': { + 'dir_paths': ['/'], + 'force_prepopulate_if_already_created': True + } + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_prepopulate_default(): + ''' modify flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ('PATCH', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'prepopulate': { + 'dir_paths': ['/'], + } + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_prepopulate_and_mount(): + ''' modify flexcache ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ('PATCH', 'storage/volumes/a1b2c3', SRR['empty_good']), + ('PATCH', 'storage/flexcache/flexcaches/a1b2c3', SRR['empty_good']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'prepopulate': { + 'dir_paths': ['/'], + }, + 'path': '/mount_path' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_modify(): + ''' create flexcache idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'volume': 'flexcache_volume2', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + } + error = 'FlexCache properties cannot be modified by this module. modify:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_warn_prepopulate(): + ''' create flexcache idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/flexcache/flexcaches', SRR['one_flexcache_record']), + ('PATCH', 'storage/volumes/a1b2c3', SRR['success']), + ('PATCH', 'storage/flexcache/flexcaches/a1b2c3', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'aggr_list': 'aggr1', + 'volume': 'flexcache_volume', + 'origin_volume': 'fc_vol_origin', + 'origin_vserver': 'ansibleSVM', + 'prepopulate': { + 'dir_paths': ['/'], + 'force_prepopulate_if_already_created': True + }, + 'junction_path': '' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('na_ontap_flexcache is not idempotent when prepopulate is present and force_prepopulate_if_already_created=true') + assert_warning_was_raised('prepopulate requires the FlexCache volume to be mounted') + + +def test_error_missing_uuid(): + module_args = { + 'use_rest': 'akway', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + current = {} + error_template = 'Error in %s: Error, no uuid in current: {}' + error = error_template % 'rest_offline_volume' + assert error in expect_and_capture_ansible_exception(my_obj.rest_offline_volume, 'fail', current)['msg'] + error = error_template % 'rest_mount_volume' + assert error in expect_and_capture_ansible_exception(my_obj.rest_mount_volume, 'fail', current, 'path')['msg'] + error = error_template % 'flexcache_rest_delete' + assert error in expect_and_capture_ansible_exception(my_obj.flexcache_rest_delete, 'fail', current)['msg'] + + +def test_prepopulate_option_checks(): + ''' create flexcache idempotent ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always', + 'prepopulate': { + 'dir_paths': ['/'], + 'force_prepopulate_if_already_created': True, + 'exclude_dir_paths': ['/'] + }, + } + error = 'Error: using prepopulate requires ONTAP 9.8 or later and REST must be enabled - ONTAP version: 9.7.0.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = 'Error: using prepopulate: exclude_dir_paths requires ONTAP 9.9 or later and REST must be enabled - ONTAP version: 9.8.0.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_event.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_event.py new file mode 100644 index 000000000..a679f9ded --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_event.py @@ -0,0 +1,338 @@ +''' unit tests ONTAP Ansible module: na_ontap_fpolicy_event ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_event \ + import NetAppOntapFpolicyEvent as fpolicy_event_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {"num_records": 0}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'fpolicy_event_record': (200, { + "num_records": 1, + "records": [{ + 'svm': {'uuid': '3b21372b-64ae-11eb-8c0e-0050568176ec'}, + 'name': 'my_event2', + 'volume_monitoring': False + }] + }, None), + 'vserver_uuid_record': (200, { + 'records': [{ + 'uuid': '3b21372b-64ae-11eb-8c0e-0050568176ec' + }] + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'fpolicy_event': + xml = self.build_fpolicy_event_info() + elif self.type == 'fpolicy_event_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_fpolicy_event_info(): + ''' build xml data for fpolicy-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'fpolicy-event-options-config': { + "event-name": "my_event2", + "vserver": "svm1", + 'volume-operation': "false" + } + } + } + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self, use_rest=None): + if self.onbox: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + vserver = 'svm1' + name = 'my_event2' + volume_monitoring = False + + else: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + vserver = 'svm1' + name = 'my_event2' + volume_monitoring = False + + args = dict({ + 'state': 'present', + 'hostname': hostname, + 'username': username, + 'password': password, + 'vserver': vserver, + 'name': name, + 'volume_monitoring': volume_monitoring + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_fpolicy_event_mock_object(cx_type='zapi', kind=None): + fpolicy_event_obj = fpolicy_event_module() + if cx_type == 'zapi': + if kind is None: + fpolicy_event_obj.server = MockONTAPConnection() + else: + fpolicy_event_obj.server = MockONTAPConnection(kind=kind) + return fpolicy_event_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + fpolicy_event_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_fpolicy_event for non-existent config''' + set_module_args(self.set_default_args(use_rest='Never')) + print('starting') + my_obj = fpolicy_event_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = self.server + assert my_obj.get_fpolicy_event is not None + + def test_ensure_get_called_existing(self): + ''' test get_fpolicy_event_config for existing config''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = fpolicy_event_module() + my_obj.server = MockONTAPConnection(kind='fpolicy_event') + assert my_obj.get_fpolicy_event() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_event.NetAppOntapFpolicyEvent.create_fpolicy_event') + def test_successful_create(self, create_fpolicy_event): + ''' creating fpolicy_event and test idempotency ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_fpolicy_event.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_event') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_event.NetAppOntapFpolicyEvent.delete_fpolicy_event') + def test_successful_delete(self, delete_fpolicy_event): + ''' delete fpolicy_event and test idempotency ''' + data = self.set_default_args(use_rest='Never') + data['state'] = 'absent' + set_module_args(data) + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_event') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_event.NetAppOntapFpolicyEvent.modify_fpolicy_event') + def test_successful_modify(self, modify_fpolicy_event): + ''' modifying fpolicy_event config and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['volume_monitoring'] = True + set_module_args(data) + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_event') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + data['volume_monitoring'] = False + set_module_args(data) + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_event') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = fpolicy_event_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_event_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_fpolicy_event() + assert 'Error creating fPolicy policy event ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_fpolicy_event() + assert 'Error deleting fPolicy policy event ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_fpolicy_event(modify={}) + assert 'Error modifying fPolicy policy event ' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['msg'] == SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['vserver_uuid_record'], + SRR['empty_good'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['vserver_uuid_record'], + SRR['fpolicy_event_record'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['vserver_uuid_record'], + SRR['fpolicy_event_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['vserver_uuid_record'], + SRR['empty_good'], # get + SRR['empty_good'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + data['volume_monitoring'] = True + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['vserver_uuid_record'], + SRR['fpolicy_event_record'], # get + SRR['empty_good'], # no response for modify + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_modify_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + data['volume_monitoring'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['vserver_uuid_record'], + SRR['fpolicy_event_record'], # get + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_event_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_ext_engine.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_ext_engine.py new file mode 100644 index 000000000..c2304876c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_ext_engine.py @@ -0,0 +1,395 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP fpolicy ext engine Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_ext_engine \ + import NetAppOntapFpolicyExtEngine as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'fpolicy_ext_engine': + xml = self.build_fpolicy_ext_engine_info() + elif self.type == 'fpolicy_ext_engine_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_fpolicy_ext_engine_info(): + ''' build xml data for fpolicy-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'fpolicy-external-engine-info': { + 'vserver': 'svm1', + 'engine-name': 'engine1', + 'primary-servers': [ + {'ip-address': '10.11.12.13'} + ], + 'port-number': '8787', + 'extern-engine-type': 'asynchronous', + 'ssl-option': 'no_auth' + } + } + } + xml.translate_struct(data) + return xml + + +def default_args(): + args = { + 'vserver': 'svm1', + 'name': 'engine1', + 'primary_servers': '10.11.12.13', + 'port': 8787, + 'extern_engine_type': 'asynchronous', + 'ssl_option': 'no_auth', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'one_fpolicy_ext_engine_record': (200, { + "records": [{ + 'engine-name': 'engine1', + 'vserver': 'svm1', + 'primary-servers': ['10.11.12.13'], + 'port': 8787, + 'extern-engine-type': 'asynchronous', + 'ssl-option': 'no-auth' + }], + 'num_records': 1 + }, None) + +} + + +def get_fpolicy_ext_engine_mock_object(cx_type='zapi', kind=None): + fpolicy_ext_engine_obj = my_module() + if cx_type == 'zapi': + if kind is None: + fpolicy_ext_engine_obj.server = MockONTAPConnection() + else: + fpolicy_ext_engine_obj.server = MockONTAPConnection(kind=kind) + return fpolicy_ext_engine_obj + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_ensure_get_called(patch_ansible): + ''' test get_fpolicy_ext_engine for non-existent engine''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + print('starting') + my_obj = my_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = MockONTAPConnection() + assert my_obj.get_fpolicy_ext_engine is not None + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' create fpolicy ext engine ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_ext_engine.NetAppOntapFpolicyExtEngine.create_fpolicy_ext_engine') +def test_successful_create(self, patch_ansible): + ''' creating fpolicy_ext_engine and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + with patch.object(my_module, 'create_fpolicy_ext_engine', wraps=my_obj.create_fpolicy_ext_engine) as mock_create: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_create.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_ext_engine') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_ext_engine.NetAppOntapFpolicyExtEngine.delete_fpolicy_ext_engine') +def test_successful_delete(self, patch_ansible): + ''' delete fpolicy_ext_engine and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['state'] = 'absent' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_ext_engine') + with patch.object(my_module, 'delete_fpolicy_ext_engine', wraps=my_obj.delete_fpolicy_ext_engine) as mock_delete: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Delete: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_delete.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + args['state'] = 'absent' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Delete: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_ext_engine.NetAppOntapFpolicyExtEngine.modify_fpolicy_ext_engine') +def test_successful_modify(self, patch_ansible): + ''' modifying fpolicy_ext_engine and testing idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['port'] = '9999' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_ext_engine') + with patch.object(my_module, 'modify_fpolicy_ext_engine', wraps=my_obj.modify_fpolicy_ext_engine) as mock_modify: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Modify: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_modify.assert_called_with({'port': 9999}) + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_ext_engine') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Modify: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +def test_if_all_methods_catch_exception(patch_ansible): + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_ext_engine_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_fpolicy_ext_engine() + assert 'Error creating fPolicy external engine ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_fpolicy_ext_engine() + assert 'Error deleting fPolicy external engine ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_fpolicy_ext_engine(modify={}) + assert 'Error modifying fPolicy external engine ' in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' create fpolicy ext engine ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' create fpolicy ext engine idempotent ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_ext_engine_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_delete_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' delete fpolicy ext engine ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_delete(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' delete fpolicy ext engine ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_ext_engine_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' modify fpolicy ext engine ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_ext_engine_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_prepopulate(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' modify fpolicy ext engine ''' + args = dict(default_args()) + args['port'] = 9999 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_ext_engine_record'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_delete_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' delete fpolicy ext engine ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_prepopulate(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' delete fpolicy ext engine ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_ext_engine_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_policy.py new file mode 100644 index 000000000..fe065af33 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_policy.py @@ -0,0 +1,339 @@ +''' unit tests ONTAP Ansible module: na_ontap_fpolicy_policy ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_policy \ + import NetAppOntapFpolicyPolicy as fpolicy_policy_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'fpolicy_policy_record': (200, { + "records": [{ + "vserver": "svm1", + "policy_name": "policy1", + "events": ['my_event'], + "engine": "native", + "is_mandatory": False, + "allow_privileged_access": False, + "is_passthrough_read_enabled": False + }] + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'fpolicy_policy': + xml = self.build_fpolicy_policy_info() + elif self.type == 'fpolicy_policy_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_fpolicy_policy_info(): + ''' build xml data for fpolicy-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'fpolicy-policy-info': { + "vserver": "svm1", + "policy-name": "policy1", + "events": [ + {'event-name': 'my_event'} + ], + "engine-name": "native", + "is-mandatory": "False", + "allow-privileged-access": "False", + "is-passthrough-read-enabled": "False" + } + } + } + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self, use_rest=None): + if self.onbox: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + vserver = 'svm1' + name = 'policy1' + events = 'my_event' + is_mandatory = False + + else: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + vserver = 'svm1' + name = 'policy1' + events = 'my_event' + is_mandatory = False + + args = dict({ + 'state': 'present', + 'hostname': hostname, + 'username': username, + 'password': password, + 'vserver': vserver, + 'name': name, + 'events': events, + 'is_mandatory': is_mandatory + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_fpolicy_policy_mock_object(cx_type='zapi', kind=None): + fpolicy_policy_obj = fpolicy_policy_module() + if cx_type == 'zapi': + if kind is None: + fpolicy_policy_obj.server = MockONTAPConnection() + else: + fpolicy_policy_obj.server = MockONTAPConnection(kind=kind) + return fpolicy_policy_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + fpolicy_policy_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_fpolicy_policy for non-existent config''' + set_module_args(self.set_default_args(use_rest='Never')) + print('starting') + my_obj = fpolicy_policy_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = self.server + assert my_obj.get_fpolicy_policy is not None + + def test_ensure_get_called_existing(self): + ''' test get_fpolicy_policy_config for existing config''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = fpolicy_policy_module() + my_obj.server = MockONTAPConnection(kind='fpolicy_policy') + assert my_obj.get_fpolicy_policy() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_policy.NetAppOntapFpolicyPolicy.create_fpolicy_policy') + def test_successful_create(self, create_fpolicy_policy): + ''' creating fpolicy_policy and test idempotency ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_fpolicy_policy.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_policy.NetAppOntapFpolicyPolicy.delete_fpolicy_policy') + def test_successful_delete(self, delete_fpolicy_policy): + ''' delete fpolicy_policy and test idempotency ''' + data = self.set_default_args(use_rest='Never') + data['state'] = 'absent' + set_module_args(data) + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_policy.NetAppOntapFpolicyPolicy.modify_fpolicy_policy') + def test_successful_modify(self, modify_fpolicy_policy): + ''' modifying fpolicy_policy config and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['is_mandatory'] = True + set_module_args(data) + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + data['is_mandatory'] = False + set_module_args(data) + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = fpolicy_policy_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('fpolicy_policy_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_fpolicy_policy() + assert 'Error creating fPolicy policy ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_fpolicy_policy() + assert 'Error deleting fPolicy policy ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_fpolicy_policy(modify={}) + assert 'Error modifying fPolicy policy ' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['msg'] == SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['fpolicy_policy_record'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['fpolicy_policy_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + data['is_mandatory'] = 'True' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['fpolicy_policy_record'], # get + SRR['empty_good'], # no response for modify + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_modify_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + data['is_mandatory'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['fpolicy_policy_record'], # get + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_fpolicy_policy_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_scope.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_scope.py new file mode 100644 index 000000000..b09ab26ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_scope.py @@ -0,0 +1,351 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP fpolicy scope Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_scope \ + import NetAppOntapFpolicyScope as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'fpolicy_scope': + xml = self.build_fpolicy_scope_info() + elif self.type == 'fpolicy_scope_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_fpolicy_scope_info(): + ''' build xml data for fpolicy-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'fpolicy-scope-config': { + 'vserver': 'svm1', + 'policy-name': 'policy1', + 'export-policies-to-exclude': [ + {'string': 'export1'} + ], + 'is-file-extension-check-on-directories-enabled': True, + 'is-monitoring-of-objects-with-no-extension-enabled': False + } + } + } + xml.translate_struct(data) + return xml + + +def default_args(): + args = { + 'vserver': 'svm1', + 'name': 'policy1', + 'export_policies_to_exclude': 'export1', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'one_fpolicy_scope_record': (200, { + "records": [{ + 'vserver': 'svm1', + 'policy_name': 'policy1', + 'export_policies_to_exclude': ['export1'], + 'is_file_extension_check_on_directories_enabled': True, + 'is_monitoring_of_objects_with_no_extension_enabled': False + }], + 'num_records': 1 + }, None) +} + + +def get_fpolicy_scope_mock_object(cx_type='zapi', kind=None): + fpolicy_scope_obj = my_module() + if cx_type == 'zapi': + if kind is None: + fpolicy_scope_obj.server = MockONTAPConnection() + else: + fpolicy_scope_obj.server = MockONTAPConnection(kind=kind) + return fpolicy_scope_obj + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_ensure_get_called(patch_ansible): + ''' test get_fpolicy_scope for non-existent policy''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + print('starting') + my_obj = my_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = MockONTAPConnection() + assert my_obj.get_fpolicy_scope is not None + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' create fpolicy scope ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_scope.NetAppOntapFpolicyScope.create_fpolicy_scope') +def test_successful_create(self, patch_ansible): + ''' creating fpolicy_scope and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + with patch.object(my_module, 'create_fpolicy_scope', wraps=my_obj.create_fpolicy_scope) as mock_create: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_create.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_scope') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_scope.NetAppOntapFpolicyScope.delete_fpolicy_scope') +def test_successful_delete(self, patch_ansible): + ''' delete fpolicy_scope and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['state'] = 'absent' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_scope') + with patch.object(my_module, 'delete_fpolicy_scope', wraps=my_obj.delete_fpolicy_scope) as mock_delete: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Delete: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_delete.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + args['state'] = 'absent' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Delete: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_scope.NetAppOntapFpolicyScope.modify_fpolicy_scope') +def test_successful_modify(self, patch_ansible): + ''' modifying fpolicy_scope and testing idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['export_policies_to_exclude'] = 'export1,export2' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_scope') + with patch.object(my_module, 'modify_fpolicy_scope', wraps=my_obj.modify_fpolicy_scope) as mock_modify: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Modify: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_modify.assert_called_with({'export_policies_to_exclude': ['export1', 'export2']}) + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_scope') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Modify: ' + repr(exc.value)) + print(exc.value.args[0]['changed']) + assert not exc.value.args[0]['changed'] + + +def test_if_all_methods_catch_exception(patch_ansible): + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_scope_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_fpolicy_scope() + assert 'Error creating fPolicy policy scope ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_fpolicy_scope() + assert 'Error deleting fPolicy policy scope ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_fpolicy_scope(modify={}) + assert 'Error modifying fPolicy policy scope ' in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' create fpolicy scope ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' create fpolicy scope idempotent ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_scope_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_delete_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' delete fpolicy scope ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_delete(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' delete fpolicy scope ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_scope_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' modify fpolicy scope ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_scope_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify_prepopulate(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' modify fpolicy scope ''' + args = dict(default_args()) + args['export_policies_to_exclude'] = 'export1,export2' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['one_fpolicy_scope_record'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_status.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_status.py new file mode 100644 index 000000000..64674a3aa --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_fpolicy_status.py @@ -0,0 +1,286 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP fpolicy status Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_status \ + import NetAppOntapFpolicyStatus as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'fpolicy_policy_enabled': + xml = self.build_fpolicy_status_info_enabled() + elif self.type == 'fpolicy_policy_disabled': + xml = self.build_fpolicy_status_info_disabled() + elif self.type == 'fpolicy_policy_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_fpolicy_status_info_enabled(): + ''' build xml data for fpolicy-policy-status-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'fpolicy-policy-status-info': { + 'vserver': 'svm1', + 'policy-name': 'fPolicy1', + 'status': 'true' + } + } + } + xml.translate_struct(data) + return xml + + @staticmethod + def build_fpolicy_status_info_disabled(): + ''' build xml data for fpolicy-policy-status-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes-list': { + 'fpolicy-policy-status-info': { + 'vserver': 'svm1', + 'policy-name': 'fPolicy1', + 'status': 'false' + } + } + } + xml.translate_struct(data) + return xml + + +def default_args(): + args = { + 'vserver': 'svm1', + 'policy_name': 'fPolicy1', + 'sequence_number': '10', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + # 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'uuid': (200, { + 'records': [{ + 'uuid': '56ab5d21' + }], + 'num_records': 1 + }, None), + 'fpolicy_status_info_enabled': (200, { + 'records': [{ + 'svm': { + 'uuid': '56ab5d21', + 'name': 'svm1' + }, + 'policies': [{ + 'name': 'fPolicy1', + 'enabled': True, + 'priority': 10 + }] + }], + 'num_records': 1 + }, None), + 'fpolicy_status_info_disabled': (200, { + 'records': [{ + 'svm': { + 'uuid': '56ab5d21', + 'name': 'svm1' + }, + 'policies': [{ + 'name': 'fPolicy1', + 'enabled': False + }] + }], + 'num_records': 1 + }, None) + +} + + +def get_fpolicy_status_mock_object(cx_type='zapi', kind=None): + fpolicy_status_obj = my_module() + if cx_type == 'zapi': + if kind is None: + fpolicy_status_obj.server = MockONTAPConnection() + else: + fpolicy_status_obj.server = MockONTAPConnection(kind=kind) + return fpolicy_status_obj + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_ensure_get_called(patch_ansible): + ''' test get_fpolicy_policy_status for non-existent fPolicy''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + print('starting') + my_obj = my_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = MockONTAPConnection('fpolicy_policy_enabled') + assert my_obj.get_fpolicy_policy_status is not None + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' enable fpolicy ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_status.NetAppOntapFpolicyStatus.enable_fpolicy_policy') +def test_successful_enable(self, patch_ansible): + ''' Enable fPolicy and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_policy_disabled') + with patch.object(my_module, 'enable_fpolicy_policy', wraps=my_obj.enable_fpolicy_policy) as mock_enable: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Enable: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_enable.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_policy_enabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Enable: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_fpolicy_status.NetAppOntapFpolicyStatus.disable_fpolicy_policy') +def test_successful_disable(self, patch_ansible): + ''' Disable fPolicy and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['state'] = 'absent' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_policy_enabled') + with patch.object(my_module, 'disable_fpolicy_policy', wraps=my_obj.disable_fpolicy_policy) as mock_disable: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Enable: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_disable.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + args['state'] = 'absent' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_policy_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Enable: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +def test_if_all_methods_catch_exception(patch_ansible): + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('fpolicy_policy_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.enable_fpolicy_policy() + print(str(exc.value.args[0]['msg'])) + assert 'Error enabling fPolicy policy ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.disable_fpolicy_policy() + assert 'Error disabling fPolicy policy ' in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_enable(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' enable fPolicy policy ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['uuid'], # get + SRR['fpolicy_status_info_disabled'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 4 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_disable(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' disable fPolicy policy ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['uuid'], # get + SRR['fpolicy_status_info_enabled'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 4 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py new file mode 100644 index 000000000..5e5b7c64c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup.py @@ -0,0 +1,415 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, patch_ansible, create_module, create_and_apply, assert_warning_was_raised, assert_no_warnings, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup \ + import NetAppOntapIgroup as igroup # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'vserver': 'vserver', + 'name': 'test', + 'initiator_names': 'init1', + 'ostype': 'linux', + 'initiator_group_type': 'fcp', + 'bind_portset': 'true', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never' +} + +igroup_with_initiator = { + 'num-records': 1, + 'attributes-list': { + 'vserver': 'vserver', + 'initiator-group-os-type': 'linux', + 'initiator-group-info': { + 'initiators': [ + {'initiator-info': {'initiator-name': 'init1'}}, + {'initiator-info': {'initiator-name': 'init2'}} + ] + } + } +} + +igroup_without_initiator = { + 'num-records': 1, + 'attributes-list': { + 'initiator-group-info': {'vserver': 'test'} + } +} + +ZRR = zapi_responses({ + 'igroup_with_initiator_info': build_zapi_response(igroup_with_initiator), + 'igroup_without_initiator_info': build_zapi_response(igroup_without_initiator) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + igroup() + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +def test_get_nonexistent_igroup(): + ''' Test if get_igroup returns None for non-existent igroup ''' + register_responses([ + ('igroup-get-iter', ZRR['empty']) + ]) + igroup_obj = create_module(igroup, DEFAULT_ARGS) + result = igroup_obj.get_igroup('dummy') + assert result is None + + +def test_get_existing_igroup_with_initiators(): + ''' Test if get_igroup returns list of existing initiators ''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_with_initiator_info']) + ]) + igroup_obj = create_module(igroup, DEFAULT_ARGS) + result = igroup_obj.get_igroup('igroup') + assert DEFAULT_ARGS['initiator_names'] in result['initiator_names'] + assert result['initiator_names'] == ['init1', 'init2'] + + +def test_get_existing_igroup_without_initiators(): + ''' Test if get_igroup returns empty list() ''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_without_initiator_info']) + ]) + igroup_obj = create_module(igroup, DEFAULT_ARGS) + result = igroup_obj.get_igroup('igroup') + assert result['initiator_names'] == [] + + +def test_modify_initiator_calls_add_and_remove(): + '''Test remove_initiator() is called followed by add_initiator() on modify operation''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_with_initiator_info']), + ('igroup-remove', ZRR['success']), + ('igroup-remove', ZRR['success']), + ('igroup-add', ZRR['success']) + ]) + igroup_obj = create_and_apply(igroup, DEFAULT_ARGS, {'initiator_names': 'replacewithme'})['changed'] + + +def test_modify_called_from_add(): + '''Test remove_initiator() and add_initiator() calls modify''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_without_initiator_info']), + ('igroup-add', ZRR['success']) + ]) + igroup_obj = create_and_apply(igroup, DEFAULT_ARGS, {'initiator_names': 'replacewithme'})['changed'] + + +def test_modify_called_from_remove(): + '''Test remove_initiator() and add_initiator() calls modify''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_with_initiator_info']), + ('igroup-remove', ZRR['success']), + ('igroup-remove', ZRR['success']) + ]) + igroup_obj = create_and_apply(igroup, DEFAULT_ARGS, {'initiator_names': ''})['changed'] + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('igroup-get-iter', ZRR['empty']), + ('igroup-create', ZRR['success']), + ('igroup-add', ZRR['success']) + ]) + igroup_obj = create_and_apply(igroup, DEFAULT_ARGS)['changed'] + + +def test_successful_delete(): + ''' Test successful delete ''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_with_initiator_info']), + ('igroup-destroy', ZRR['success']) + ]) + igroup_obj = create_and_apply(igroup, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_successful_rename(): + '''Test successful rename''' + register_responses([ + ('igroup-get-iter', ZRR['empty']), + ('igroup-get-iter', ZRR['igroup_with_initiator_info']), + ('igroup-rename', ZRR['success']), + ('igroup-remove', ZRR['success']), + ]) + args = { + 'from_name': 'test', + 'name': 'test_new' + } + assert create_and_apply(igroup, DEFAULT_ARGS, args)['changed'] + + +def test_negative_modify_anything_zapi(): + ''' Test ZAPI option not currently supported in REST is rejected ''' + register_responses([ + ('igroup-get-iter', ZRR['igroup_with_initiator_info']), + ]) + args = { + 'vserver': 'my_vserver', + 'use_rest': 'never' + } + msg = "Error: modifying {'vserver': 'my_vserver'} is not supported in ZAPI" + assert msg in create_and_apply(igroup, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_negative_mutually_exclusive(): + ''' Test ZAPI option not currently supported in REST is rejected ''' + args = { + 'use_rest': 'auto', + 'igroups': 'my_group' + } + msg = "parameters are mutually exclusive: igroups|initiator_names" + assert msg in create_module(igroup, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_negative_igroups_require_rest(): + ''' Test ZAPI option not currently supported in REST is rejected ''' + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['initiator_names'] + args = { + 'igroups': 'my_group' + } + msg = "requires ONTAP 9.9.1 or later and REST must be enabled" + assert msg in create_module(igroup, DEFAULT_ARGS_COPY, args, fail=True)['msg'] + + +SRR = rest_responses({ + 'one_igroup_record': (200, dict(records=[ + dict(uuid='a1b2c3', + name='test', + svm=dict(name='vserver'), + initiators=[{'name': 'todelete'}], + protocol='fcp', + os_type='aix') + ], num_records=1), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None) +}) + + +def test_successful_create_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['empty_records']), + ('POST', 'protocols/san/igroups', SRR['success']) + ]) + assert create_and_apply(igroup, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_incomplete_record_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['one_record_uuid']) + ]) + msg = "Error: unexpected igroup body:" + assert msg in create_and_apply(igroup, DEFAULT_ARGS, {'use_rest': 'always'}, fail=True)['msg'] + + +def test_successful_delete_rest(): + ''' Test successful delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']), + ('DELETE', 'protocols/san/igroups/a1b2c3', SRR['success']) + ]) + args = {'state': 'absent', 'use_rest': 'always'} + assert create_and_apply(igroup, DEFAULT_ARGS, args)['changed'] + + +def test_successful_modify_rest(): + ''' Test successful modify ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']), + ('DELETE', 'protocols/san/igroups/a1b2c3/initiators/todelete', SRR['success']), + ('POST', 'protocols/san/igroups/a1b2c3/initiators', SRR['success']), + ('PATCH', 'protocols/san/igroups/a1b2c3', SRR['success']) + ]) + assert create_and_apply(igroup, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_successful_modify_initiator_objects_rest(): + ''' Test successful modify ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']), + ('DELETE', 'protocols/san/igroups/a1b2c3/initiators/todelete', SRR['success']), + ('POST', 'protocols/san/igroups/a1b2c3/initiators', SRR['success']), + ('PATCH', 'protocols/san/igroups/a1b2c3', SRR['success']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['initiator_names'] + DEFAULT_ARGS_COPY['initiator_objects'] = [{'name': 'init1', 'comment': 'comment1'}] + assert create_and_apply(igroup, DEFAULT_ARGS_COPY, {'use_rest': 'always'})['changed'] + + +def test_successful_modify_initiator_objects_comment_rest(): + ''' Test successful modify ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']), + ('PATCH', 'protocols/san/igroups/a1b2c3/initiators/todelete', SRR['success']), + ('PATCH', 'protocols/san/igroups/a1b2c3', SRR['success']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['initiator_names'] + DEFAULT_ARGS_COPY['initiator_objects'] = [{'name': 'todelete', 'comment': 'comment1'}] + assert create_and_apply(igroup, DEFAULT_ARGS_COPY, {'use_rest': 'always'})['changed'] + + +def test_successful_modify_igroups_rest(): + ''' Test successful modify ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']), + ('DELETE', 'protocols/san/igroups/a1b2c3/initiators/todelete', SRR['success']), + ('POST', 'protocols/san/igroups/a1b2c3/igroups', SRR['success']), + ('PATCH', 'protocols/san/igroups/a1b2c3', SRR['success']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['initiator_names'] + args = { + 'igroups': ['test_igroup'], + 'use_rest': 'auto', + 'force_remove_initiator': True + } + assert create_and_apply(igroup, DEFAULT_ARGS_COPY, args)['changed'] + + +def test_9_9_0_no_igroups_rest(): + ''' Test failed to use igroups ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['initiator_names'] + args = { + 'igroups': ['test_igroup'], + 'use_rest': 'always' + } + msg = 'Error: using igroups requires ONTAP 9.9.1 or later and REST must be enabled - ONTAP version: 9.9.0.' + assert msg in create_module(igroup, DEFAULT_ARGS_COPY, args, fail=True)['msg'] + + +def test_successful_rename_rest(): + '''Test successful rename''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/igroups', SRR['empty_records']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']), + ('DELETE', 'protocols/san/igroups/a1b2c3/initiators/todelete', SRR['success']), + ('POST', 'protocols/san/igroups/a1b2c3/initiators', SRR['success']), + ('PATCH', 'protocols/san/igroups/a1b2c3', SRR['success']) + ]) + args = { + 'use_rest': 'always', + 'from_name': 'test', + 'name': 'test_new' + } + assert create_and_apply(igroup, DEFAULT_ARGS, args)['changed'] + + +def test_negative_zapi_or_rest99_option(): + ''' Test ZAPI option not currently supported in REST is rejected ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']) + ]) + args = { + 'use_rest': 'always', + 'bind_portset': 'my_portset' + } + create_module(igroup, DEFAULT_ARGS, args) + msg = "Warning: falling back to ZAPI: using bind_portset requires ONTAP 9.9 or later and REST must be enabled - ONTAP version: 9.8.0." + print_warnings() + assert_warning_was_raised(msg) + + +def test_positive_zapi_or_rest99_option(): + ''' Test ZAPI option not currently supported in REST forces ZAPI calls ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']) + ]) + args = { + 'use_rest': 'auto', + 'bind_portset': 'my_portset' + } + create_module(igroup, DEFAULT_ARGS, args) + msg = "Warning: falling back to ZAPI: using bind_portset requires ONTAP 9.9 or later and REST must be enabled - ONTAP version: 9.8.0." + print_warnings() + assert_warning_was_raised(msg) + + +def test_create_rest_99(): + ''' Test 9.9 option works with REST ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['empty_records']), + ('POST', 'protocols/san/igroups', SRR['success']) + ]) + args = { + 'use_rest': 'auto', + 'bind_portset': 'my_portset' + } + assert create_and_apply(igroup, DEFAULT_ARGS, args)['changed'] + print_warnings + assert_no_warnings() + + +def test_negative_modify_vserver_rest(): + ''' Test ZAPI option not currently supported in REST is rejected ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['one_igroup_record']) + ]) + args = { + 'vserver': 'my_vserver', + 'use_rest': 'always' + } + msg = "Error: modifying {'vserver': 'my_vserver'} is not supported in REST" + assert msg in create_and_apply(igroup, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_negative_igroups_require_9_9(): + ''' Test ZAPI option not currently supported in REST is rejected ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['initiator_names'] + args = { + 'igroups': 'test_igroup', + 'use_rest': 'always' + } + msg = "requires ONTAP 9.9.1 or later and REST must be enabled" + assert msg in create_module(igroup, DEFAULT_ARGS_COPY, args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py new file mode 100644 index 000000000..7da908dcb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_igroup_initiator.py @@ -0,0 +1,256 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_igroup_initiator \ + import NetAppOntapIgroupInitiator as initiator # module under test +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'state': 'present', + 'vserver': 'vserver', + 'name': 'init1', + 'initiator_group': 'test', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' +} + + +initiator_info = { + 'num-records': 1, + 'attributes-list': { + 'initiator-group-info': { + 'initiators': [ + {'initiator-info': {'initiator-name': 'init1'}}, + {'initiator-info': {'initiator-name': 'init2'}} + ] + } + } +} + + +ZRR = zapi_responses({ + 'initiator_info': build_zapi_response(initiator_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + initiator() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_igroup(): + ''' Test if get_initiators returns None for non-existent initiator ''' + register_responses([ + ('igroup-get-iter', ZRR['empty']) + ]) + initiator_obj = create_module(initiator, DEFAULT_ARGS) + result = initiator_obj.get_initiators() + assert result == [] + + +def test_get_existing_initiator(): + ''' Test if get_initiator returns None for existing initiator ''' + register_responses([ + ('igroup-get-iter', ZRR['initiator_info']) + ]) + initiator_obj = create_module(initiator, DEFAULT_ARGS) + result = initiator_obj.get_initiators() + assert DEFAULT_ARGS['name'] in result + assert result == ['init1', 'init2'] # from build_igroup_initiators() + + +def test_successful_add(): + ''' Test successful add''' + register_responses([ + ('igroup-get-iter', ZRR['initiator_info']), + ('igroup-add', ZRR['success']) + ]) + args = {'name': 'init3'} + assert create_and_apply(initiator, DEFAULT_ARGS, args)['changed'] + + +def test_successful_add_idempotency(): + ''' Test successful add idempotency ''' + register_responses([ + ('igroup-get-iter', ZRR['initiator_info']) + ]) + assert create_and_apply(initiator, DEFAULT_ARGS)['changed'] is False + + +def test_successful_remove(): + ''' Test successful remove ''' + register_responses([ + ('igroup-get-iter', ZRR['initiator_info']), + ('igroup-remove', ZRR['success']) + ]) + args = {'state': 'absent'} + assert create_and_apply(initiator, DEFAULT_ARGS, args)['changed'] + + +def test_successful_remove_idempotency(): + ''' Test successful remove idempotency''' + register_responses([ + ('igroup-get-iter', ZRR['initiator_info']) + ]) + args = {'state': 'absent', 'name': 'alreadyremoved'} + assert create_and_apply(initiator, DEFAULT_ARGS)['changed'] is False + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('igroup-get-iter', ZRR['error']), + ('igroup-add', ZRR['error']), + ('igroup-remove', ZRR['error']) + ]) + initiator_obj = create_module(initiator, DEFAULT_ARGS) + + error = expect_and_capture_ansible_exception(initiator_obj.get_initiators, 'fail')['msg'] + assert 'Error fetching igroup info' in error + + error = expect_and_capture_ansible_exception(initiator_obj.modify_initiator, 'fail', 'init4', 'igroup-add')['msg'] + assert 'Error modifying igroup initiator' in error + + error = expect_and_capture_ansible_exception(initiator_obj.modify_initiator, 'fail', 'init4', 'igroup-remove')['msg'] + assert 'Error modifying igroup initiator' in error + + +SRR = rest_responses({ + 'initiator_info': (200, {"records": [ + { + "svm": {"name": "svm1"}, + "uuid": "897de45f-bbbf-11ec-9f18-005056b3b297", + "name": "init1", + "initiators": [ + {"name": "iqn.2001-04.com.example:abc123"}, + {"name": "iqn.2001-04.com.example:abc124"}, + {'name': 'init3'} + ] + } + ], "num_records": 1}, None), + 'igroup_without_intiators': (200, {"records": [ + { + "svm": {"name": "svm1"}, + "uuid": "897de45f-bbbf-11ec-9f18-005056alr297", + "name": "init22", + } + ], "num_records": 1}, None) +}) + + +def test_successful_add_rest(): + ''' Test successful add''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['initiator_info']), + ('POST', 'protocols/san/igroups/897de45f-bbbf-11ec-9f18-005056b3b297/initiators', SRR['success']) + ]) + assert create_and_apply(initiator, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_successful_add_idempotency_rest(): + ''' Test successful add idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['initiator_info']) + ]) + args = {'use_rest': 'always', 'name': 'iqn.2001-04.com.example:abc123'} + assert create_and_apply(initiator, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_add_to_0_initiator_igroup_rest(): + ''' Test successful add''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['igroup_without_intiators']), + ('POST', 'protocols/san/igroups/897de45f-bbbf-11ec-9f18-005056alr297/initiators', SRR['success']) + ]) + assert create_and_apply(initiator, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_successful_remove_rest(): + ''' Test successful remove ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['initiator_info']), + ('DELETE', 'protocols/san/igroups/897de45f-bbbf-11ec-9f18-005056b3b297/initiators/init3', SRR['success']) + ]) + args = {'use_rest': 'always', 'name': 'init3', 'state': 'absent'} + assert create_and_apply(initiator, DEFAULT_ARGS, args)['changed'] + + +def test_successful_remove_idempotency_rest(): + ''' Test successful remove idempotency''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['initiator_info']) + ]) + args = {'use_rest': 'always', 'name': 'alreadyremoved', 'state': 'absent'} + assert create_and_apply(initiator, DEFAULT_ARGS, args)['changed'] is False + + +def test_get_initiator_catch_exception_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['generic_error']) + ]) + error = create_and_apply(initiator, DEFAULT_ARGS, {'use_rest': 'always'}, 'fail')['msg'] + assert 'Error fetching igroup info' in error + + +def test_add_initiator_catch_exception_rest(): + ''' Test add error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['initiator_info']), + ('POST', 'protocols/san/igroups/897de45f-bbbf-11ec-9f18-005056b3b297/initiators', SRR['generic_error']) + ]) + error = create_and_apply(initiator, DEFAULT_ARGS, {'use_rest': 'always'}, 'fail')['msg'] + assert 'Error modifying igroup initiator' in error + + +def test_remove_initiator_catch_exception_rest(): + ''' Test remove error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['initiator_info']), + ('DELETE', 'protocols/san/igroups/897de45f-bbbf-11ec-9f18-005056b3b297/initiators/init3', SRR['generic_error']) + ]) + args = {'use_rest': 'always', 'name': 'init3', 'state': 'absent'} + error = create_and_apply(initiator, DEFAULT_ARGS, args, 'fail')['msg'] + assert 'Error modifying igroup initiator' in error + + +def test_error_uuid_not_found(): + ''' Test uuid error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/igroups', SRR['empty_records']) + ]) + args = {'use_rest': 'always'} + error = create_and_apply(initiator, DEFAULT_ARGS, args, 'fail')['msg'] + assert 'Error modifying igroup initiator init1: igroup not found' in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py new file mode 100644 index 000000000..18c35c910 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_info.py @@ -0,0 +1,738 @@ +# (c) 2018-2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for ONTAP Ansible module na_ontap_info ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_warning_was_raised, expect_and_capture_ansible_exception, call_main, create_module, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_info import NetAppONTAPGatherInfo as my_module, main as my_main +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_info import convert_keys as info_convert_keys, __finditem as info_finditem + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'use_rest', +} + + +def net_port_info(port_type): + return { + 'attributes-list': [{ + 'net-port-info': { + 'node': 'node_0', + 'port': 'port_0', + 'broadcast_domain': 'broadcast_domain_0', + 'ipspace': 'ipspace_0', + 'port_type': port_type + }}, { + 'net-port-info': { + 'node': 'node_1', + 'port': 'port_1', + 'broadcast_domain': 'broadcast_domain_1', + 'ipspace': 'ipspace_1', + 'port_type': port_type + }} + ] + } + + +def net_ifgrp_info(id): + return { + 'attributes': { + 'net-ifgrp-info': { + 'ifgrp-name': 'ifgrp_%d' % id, + 'node': 'node_%d' % id, + } + } + } + + +def aggr_efficiency_info(node): + attributes = { + 'aggregate': 'v2', + } + if node: + attributes['node'] = node + return { + 'attributes-list': [{ + 'aggr-efficiency-info': attributes + }] + } + + +def lun_info(path, next=False): + info = { + 'attributes-list': [{ + 'lun-info': { + 'serial-number': 'z6CcD+SK5mPb', + 'vserver': 'svm1', + 'path': path} + }] + } + if next: + info['next-tag'] = 'next_tag' + return info + + +list_of_one = [{'k1': 'v1'}] +list_of_two = [{'k1': 'v1'}, {'k2': 'v2'}] +list_of_two_dups = [{'k1': 'v1'}, {'k1': 'v2'}] + + +ZRR = zapi_responses({ + 'net_port_info': build_zapi_response(net_port_info('whatever'), 2), + 'net_port_info_with_ifgroup': build_zapi_response(net_port_info('if_group'), 2), + 'net_ifgrp_info_0': build_zapi_response(net_ifgrp_info(0), 1), + 'net_ifgrp_info_1': build_zapi_response(net_ifgrp_info(1), 1), + 'list_of_one': build_zapi_response(list_of_one), + 'list_of_two': build_zapi_response(list_of_two), + 'list_of_two_dups': build_zapi_response(list_of_two_dups), + 'aggr_efficiency_info': build_zapi_response(aggr_efficiency_info('v1')), + 'aggr_efficiency_info_no_node': build_zapi_response(aggr_efficiency_info(None)), + 'lun_info': build_zapi_response(lun_info('p1')), + 'lun_info_next_2': build_zapi_response(lun_info('p2', True)), + 'lun_info_next_3': build_zapi_response(lun_info('p3', True)), + 'lun_info_next_4': build_zapi_response(lun_info('p4', True)), +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + assert 'missing required arguments: hostname' in call_main(my_main, {}, fail=True)['msg'] + + +def test_ensure_command_called(): + ''' calling get_all will raise a KeyError exception ''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['version']), + ('ZAPI', 'net-interface-get-iter', ZRR['success']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + results = my_obj.get_all(['net_interface_info']) + assert 'net_interface_info' in results + + +def test_get_generic_get_iter(): + '''calling get_generic_get_iter will return expected dict''' + register_responses([ + ('ZAPI', 'net-port-get-iter', ZRR['net_port_info']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + result = obj.get_generic_get_iter( + 'net-port-get-iter', + attribute='net-port-info', + key_fields=('node', 'port'), + query={'max-records': '1024'} + ) + assert result.get('node_0:port_0') + assert result.get('node_1:port_1') + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_info.NetAppONTAPGatherInfo.get_all') +def test_main(get_all): + '''test main method - default: no state.''' + register_responses([ + ]) + get_all.side_effect = [ + {'test_get_all': {'vserver_login_banner_info': 'test_vserver_login_banner_info', 'vserver_info': 'test_vserver_info'}} + ] + results = call_main(my_main, DEFAULT_ARGS) + assert 'ontap_info' in results + assert 'test_get_all' in results['ontap_info'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_info.NetAppONTAPGatherInfo.get_all') +def test_main_with_state(get_all): + '''test main method with explicit state.''' + register_responses([ + ]) + module_args = {'state': 'some_state'} + get_all.side_effect = [ + {'test_get_all': {'vserver_login_banner_info': 'test_vserver_login_banner_info', 'vserver_info': 'test_vserver_info'}} + ] + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert 'ontap_info' in results + assert 'test_get_all' in results['ontap_info'] + print_warnings() + assert_warning_was_raised("option 'state' is deprecated.") + + +def test_get_ifgrp_info_no_ifgrp(): + '''test get_ifgrp_info with empty ifgrp_info''' + register_responses([ + ('ZAPI', 'net-port-get-iter', ZRR['net_port_info']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + result = obj.get_ifgrp_info() + assert result == {} + + +def test_get_ifgrp_info_with_ifgrp(): + '''test get_ifgrp_info with empty ifgrp_info''' + register_responses([ + ('ZAPI', 'net-port-get-iter', ZRR['net_port_info_with_ifgroup']), + ('ZAPI', 'net-port-ifgrp-get', ZRR['net_ifgrp_info_0']), + ('ZAPI', 'net-port-ifgrp-get', ZRR['net_ifgrp_info_1']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + results = obj.get_ifgrp_info() + assert results.get('node_0:ifgrp_0') + assert results.get('node_1:ifgrp_1') + + +def test_ontapi_error(): + '''test ontapi will raise zapi error''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['error']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + error = zapi_error_message('Error calling API system-get-ontapi-version') + assert error in expect_and_capture_ansible_exception(obj.ontapi, 'fail')['msg'] + + +def test_call_api_error(): + '''test call_api will raise zapi error''' + register_responses([ + ('ZAPI', 'security-key-manager-key-get-iter', ZRR['error']), + ('ZAPI', 'lun-get-iter', ZRR['error_missing_api']), + ('ZAPI', 'nvme-get-iter', ZRR['error']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + # 1 error is ignored + assert obj.call_api('security-key-manager-key-get-iter') == (None, None) + # 2 missing API (cluster admin API not visible at vserver level) + error = zapi_error_message('Error invalid API. Most likely running a cluster level API as vserver', 13005) + assert error in expect_and_capture_ansible_exception(obj.call_api, 'fail', 'lun-get-iter')['msg'] + # 3 API error + error = zapi_error_message('Error calling API nvme-get-iter') + assert error in expect_and_capture_ansible_exception(obj.call_api, 'fail', 'nvme-get-iter')['msg'] + + +def test_get_generic_get_iter_key_error(): + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + keys = 'single_key' + error = "Error: key 'single_key' not found for lun-get-iter, got:" + assert error in expect_and_capture_ansible_exception(obj.get_generic_get_iter, 'fail', 'lun-get-iter', None, keys)['msg'] + keys = ('key1', 'path') + error = "Error: key 'key1' not found for lun-get-iter, got:" + assert error in expect_and_capture_ansible_exception(obj.get_generic_get_iter, 'fail', 'lun-get-iter', None, keys)['msg'] + # ignoring key_errors + module_args = {'continue_on_error': 'key_error'} + obj = create_module(my_module, DEFAULT_ARGS, module_args) + keys = 'single_key' + missing_key = 'Error_1_key_not_found_%s' % keys + results = obj.get_generic_get_iter('lun-get-iter', None, keys) + assert missing_key in results + keys = ('key1', 'path') + missing_key = 'Error_1_key_not_found_%s' % keys[0] + results = obj.get_generic_get_iter('lun-get-iter', None, keys) + assert missing_key in results + + +def test_find_item(): + '''test __find_item return expected key value''' + register_responses([ + ]) + obj = {"A": 1, "B": {"C": {"D": 2}}} + key = "D" + result = info_finditem(obj, key) + assert result == 2 + obj = {"A": 1, "B": {"C": {"D": None}}} + result = info_finditem(obj, key) + assert result == "None" + + +def test_subset_return_all_complete(): + ''' Check all returns all of the entries if version is high enough ''' + register_responses([ + ]) + version = '170' # change this if new ZAPIs are supported + obj = create_module(my_module, DEFAULT_ARGS) + subset = obj.get_subset(['all'], version) + assert set(obj.info_subsets.keys()) == subset + + +def test_subset_return_all_partial(): + ''' Check all returns a subset of the entries if version is low enough ''' + register_responses([ + ]) + version = '120' # low enough so that some ZAPIs are not supported + obj = create_module(my_module, DEFAULT_ARGS) + subset = obj.get_subset(['all'], version) + all_keys = obj.info_subsets.keys() + assert set(all_keys) > subset + supported_keys = filter(lambda key: obj.info_subsets[key]['min_version'] <= version, all_keys) + assert set(supported_keys) == subset + + +def test_subset_return_one(): + ''' Check single entry returns one ''' + register_responses([ + ]) + version = '120' # low enough so that some ZAPIs are not supported + obj = create_module(my_module, DEFAULT_ARGS) + subset = obj.get_subset(['net_interface_info'], version) + assert len(subset) == 1 + + +def test_subset_return_multiple(): + ''' Check that more than one entry returns the same number ''' + register_responses([ + ]) + version = '120' # low enough so that some ZAPIs are not supported + obj = create_module(my_module, DEFAULT_ARGS) + subset_entries = ['net_interface_info', 'net_port_info'] + subset = obj.get_subset(subset_entries, version) + assert len(subset) == len(subset_entries) + + +def test_subset_return_bad(): + ''' Check that a bad subset entry will error out ''' + register_responses([ + ]) + version = '120' # low enough so that some ZAPIs are not supported + obj = create_module(my_module, DEFAULT_ARGS) + error = 'Bad subset: my_invalid_subset' + assert error in expect_and_capture_ansible_exception(obj.get_subset, 'fail', ['net_interface_info', 'my_invalid_subset'], version)['msg'] + + +def test_subset_return_unsupported(): + ''' Check that a new subset entry will error out on an older system ''' + register_responses([ + ]) + version = '120' # low enough so that some ZAPIs are not supported + key = 'nvme_info' # only supported starting at 140 + obj = create_module(my_module, DEFAULT_ARGS) + error = 'Remote system at version %s does not support %s' % (version, key) + assert error in expect_and_capture_ansible_exception(obj.get_subset, 'fail', ['net_interface_info', key], version)['msg'] + + +def test_subset_return_none(): + ''' Check usable subset can be empty ''' + register_responses([ + ]) + version = '!' # lower then 0, so that no ZAPI is supported + obj = create_module(my_module, DEFAULT_ARGS) + subset = obj.get_subset(['all'], version) + assert len(subset) == 0 + + +def test_subset_return_all_expect_one(): + ''' Check !x returns all of the entries except x if version is high enough ''' + register_responses([ + ]) + version = '170' # change this if new ZAPIs are supported + obj = create_module(my_module, DEFAULT_ARGS) + subset = obj.get_subset(['!net_interface_info'], version) + assert len(obj.info_subsets.keys()) == len(subset) + 1 + subset.add('net_interface_info') + assert set(obj.info_subsets.keys()) == subset + + +def test_subset_return_all_expect_three(): + ''' Check !x,!y,!z returns all of the entries except x, y, z if version is high enough ''' + register_responses([ + ]) + version = '170' # change this if new ZAPIs are supported + obj = create_module(my_module, DEFAULT_ARGS) + subset = obj.get_subset(['!net_interface_info', '!nvme_info', '!ontap_version'], version) + assert len(obj.info_subsets.keys()) == len(subset) + 3 + subset.update(['net_interface_info', 'nvme_info', 'ontap_version']) + assert set(obj.info_subsets.keys()) == subset + + +def test_subset_return_none_with_exclusion(): + ''' Check usable subset can be empty with !x ''' + register_responses([ + ]) + version = '!' # lower then 0, so that no ZAPI is supported + key = 'net_interface_info' + obj = create_module(my_module, DEFAULT_ARGS) + error = 'Remote system at version %s does not support %s' % (version, key) + assert error in expect_and_capture_ansible_exception(obj.get_subset, 'fail', ['!' + key], version)['msg'] + + +def test_get_generic_get_iter_flatten_list_of_one(): + '''calling get_generic_get_iter will return expected dict''' + register_responses([ + ('ZAPI', 'list_of_one', ZRR['list_of_one']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + result = obj.get_generic_get_iter( + 'list_of_one', + attributes_list_tag=None, + ) + print(ZRR['list_of_one'][0].to_string()) + print(result) + assert isinstance(result, dict) + assert result.get('k1') == 'v1' + + +def test_get_generic_get_iter_flatten_list_of_two(): + '''calling get_generic_get_iter will return expected dict''' + register_responses([ + ('ZAPI', 'list_of_two', ZRR['list_of_two']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + result = obj.get_generic_get_iter( + 'list_of_two', + attributes_list_tag=None, + ) + print(result) + assert isinstance(result, dict) + assert result.get('k1') == 'v1' + assert result.get('k2') == 'v2' + + +def test_get_generic_get_iter_flatten_list_of_two_dups(): + '''calling get_generic_get_iter will return expected dict''' + register_responses([ + ('ZAPI', 'list_of_two_dups', ZRR['list_of_two_dups']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + result = obj.get_generic_get_iter( + 'list_of_two_dups', + attributes_list_tag=None, + ) + assert isinstance(result, list) + assert result[0].get('k1') == 'v1' + assert result[1].get('k1') == 'v2' + + +def test_check_underscore(): + ''' Check warning is recorded if '_' is found in key ''' + register_responses([ + ]) + test_dict = dict( + bad_key='something' + ) + test_dict['good-key'] = [dict( + other_bad_key=dict( + yet_another_bad_key=1 + ), + somekey=dict( + more_bad_key=2 + ) + )] + obj = create_module(my_module, DEFAULT_ARGS) + obj.check_for___in_keys(test_dict) + print('Info: %s' % repr(obj.warnings)) + for key in ['bad_key', 'other_bad_key', 'yet_another_bad_key', 'more_bad_key']: + msg = "Underscore in ZAPI tag: %s, do you mean '-'?" % key + assert msg in obj.warnings + obj.warnings.remove(msg) + # make sure there is no extra warnings (eg we found and removed all of them) + assert obj.warnings == list() + + +def d2us(astr): + return str.replace(astr, '-', '_') + + +def test_convert_keys_string(): + ''' no conversion ''' + register_responses([ + ]) + key = 'a-b-c' + assert info_convert_keys(key) == key + + +def test_convert_keys_tuple(): + ''' no conversion ''' + register_responses([ + ]) + key = 'a-b-c' + anobject = (key, key) + assert info_convert_keys(anobject) == anobject + + +def test_convert_keys_list(): + ''' no conversion ''' + register_responses([ + ]) + key = 'a-b-c' + anobject = [key, key] + assert info_convert_keys(anobject) == anobject + + +def test_convert_keys_simple_dict(): + ''' conversion of keys ''' + register_responses([ + ]) + key = 'a-b-c' + anobject = {key: 1} + assert list(info_convert_keys(anobject).keys())[0] == d2us(key) + + +def test_convert_keys_list_of_dict(): + ''' conversion of keys ''' + register_responses([ + ]) + key = 'a-b-c' + anobject = [{key: 1}, {key: 2}] + converted = info_convert_keys(anobject) + for adict in converted: + for akey in adict: + assert akey == d2us(key) + + +def test_set_error_flags_error_n(): + ''' Check set_error__flags return correct dict ''' + register_responses([ + ]) + module_args = {'continue_on_error': ['never', 'whatever']} + msg = "never needs to be the only keyword in 'continue_on_error' option." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_set_error_flags_error_a(): + ''' Check set_error__flags return correct dict ''' + register_responses([ + ]) + module_args = {'continue_on_error': ['whatever', 'always']} + print('Info: %s' % call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg']) + msg = "always needs to be the only keyword in 'continue_on_error' option." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_set_error_flags_error_u(): + ''' Check set_error__flags return correct dict ''' + register_responses([ + ]) + module_args = {'continue_on_error': ['whatever', 'else']} + + print('Info: %s' % call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg']) + msg = "whatever is not a valid keyword in 'continue_on_error' option." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_set_error_flags_1_flag(): + ''' Check set_error__flags return correct dict ''' + register_responses([ + ]) + module_args = {'continue_on_error': ['missing_vserver_api_error']} + obj = create_module(my_module, DEFAULT_ARGS, module_args, 'vserver') + assert not obj.error_flags['missing_vserver_api_error'] + assert obj.error_flags['rpc_error'] + assert obj.error_flags['other_error'] + + +def test_set_error_flags_2_flags(): + ''' Check set_error__flags return correct dict ''' + register_responses([ + ]) + module_args = {'continue_on_error': ['missing_vserver_api_error', 'rpc_error']} + obj = create_module(my_module, DEFAULT_ARGS, module_args, 'vserver') + assert not obj.error_flags['missing_vserver_api_error'] + assert not obj.error_flags['rpc_error'] + assert obj.error_flags['other_error'] + + +def test_set_error_flags_3_flags(): + ''' Check set_error__flags return correct dict ''' + register_responses([ + ]) + module_args = {'continue_on_error': ['missing_vserver_api_error', 'rpc_error', 'other_error']} + obj = create_module(my_module, DEFAULT_ARGS, module_args, 'vserver') + assert not obj.error_flags['missing_vserver_api_error'] + assert not obj.error_flags['rpc_error'] + assert not obj.error_flags['other_error'] + + +def test_get_subset_missing_key(): + '''calling aggr_efficiency_info with missing key''' + register_responses([ + ('ZAPI', 'aggr-efficiency-get-iter', ZRR['aggr_efficiency_info']), + ('ZAPI', 'aggr-efficiency-get-iter', ZRR['aggr_efficiency_info_no_node']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + call = obj.info_subsets['aggr_efficiency_info'] + info = call['method'](**call['kwargs']) + print(info) + assert 'v1:v2' in info + call = obj.info_subsets['aggr_efficiency_info'] + info = call['method'](**call['kwargs']) + print(info) + assert 'key_not_present:v2' in info + + +def test_get_lun_with_serial(): + '''calling lun_info with serial-number key''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + # no records + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + info = obj.get_all(['lun_info']) + print(info) + assert 'lun_info' in info + lun_info = info['lun_info']['svm1:p1'] + assert lun_info['serial_number'] == 'z6CcD+SK5mPb' + assert lun_info['serial_hex'] == '7a364363442b534b356d5062' + assert lun_info['naa_id'] == 'naa.600a0980' + '7a364363442b534b356d5062' + # no records + info = obj.get_all(['lun_info']) + assert 'lun_info' in info + assert info['lun_info'] is None + # error + + +def test_get_nothing(): + '''calling with !all''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + info = obj.get_all(['!all']) + print(info) + assert info == {'ontap_version': '0', 'ontapi_version': '0'} + + +def test_deprecation_ontap_version(): + '''calling ontap_version''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + info = obj.get_all(['ontap_version']) + assert info + assert 'deprecation_warning' in info + assert info['deprecation_warning'] == 'ontap_version is deprecated, please use ontapi_version' + + +def test_help(): + '''calling help''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ]) + obj = create_module(my_module, DEFAULT_ARGS) + info = obj.get_all(['help']) + assert info + assert 'help' in info + + +def test_desired_attributes(): + '''desired_attributes option''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'lun-get-iter', ZRR['success']), + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ]) + module_args = {'desired_attributes': {'attr': 'value'}} + obj = create_module(my_module, DEFAULT_ARGS, module_args) + info = obj.get_all(['lun_info']) + assert 'lun_info' in info + assert info['lun_info'] is None + error = 'desired_attributes option is only supported with a single subset' + assert error in expect_and_capture_ansible_exception(obj.get_all, 'fail', ['ontapi_version', 'ontap_system_version'])['msg'] + + +def test_query(): + '''query option''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ]) + module_args = {'query': {'attr': 'value', 'a_b': 'val'}} + obj = create_module(my_module, DEFAULT_ARGS, module_args) + info = obj.get_all(['ontapi_version']) + assert info == {'ontap_version': '0', 'ontapi_version': '0', 'module_warnings': ["Underscore in ZAPI tag: a_b, do you mean '-'?"]} + error = 'query option is only supported with a single subset' + assert error in expect_and_capture_ansible_exception(obj.get_all, 'fail', ['ontapi_version', 'ontap_system_version'])['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_info.NetAppONTAPGatherInfo.get_all') +def test_get_all_with_summary(mock_get_all): + '''all and summary''' + register_responses([ + ]) + module_args = {'summary': True, 'gather_subset': None} + mock_get_all.return_value = {'a_info': {'1': '1.1'}, 'b_info': {'2': '2.2'}} + info = call_main(my_main, DEFAULT_ARGS, module_args) + assert info + assert 'ontap_info' in info + assert info['ontap_info'] == {'a_info': {'1': None}.keys(), 'b_info': {'2': None}.keys()} + + +def test_repeated_get(): + '''query option''' + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info_next_2']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info_next_3']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info_next_4']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ]) + module_args = {'query': {}} + obj = create_module(my_module, DEFAULT_ARGS, module_args) + info = obj.get_all(['lun_info']) + assert info + assert 'lun_info' in info + assert len(info['lun_info']) == 4 + + +def test_repeated_get_error(): + '''query option''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info_next_2']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ]) + module_args = {'query': {}} + obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "'next-tag' is not expected for this API" + assert error in expect_and_capture_ansible_exception(obj.call_api, 'fail', 'lun-get-iter', attributes_list_tag=None)['msg'] + + +def test_attribute_error(): + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'license-v2-list-info', ZRR['no_records']), + ]) + module_args = {'gather_subset': ['license_info'], 'vserver': 'svm'} + error = "Error: attribute 'licenses' not found for license-v2-list-info, got:" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_continue_on_error(): + register_responses([ + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'license-v2-list-info', ZRR['error']), + ('ZAPI', 'system-get-ontapi-version', ZRR['success']), + ('ZAPI', 'license-v2-list-info', ZRR['error']), + ]) + module_args = {'gather_subset': ['license_info'], 'vserver': 'svm'} + error = zapi_error_message('Error calling API license-v2-list-info') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = {'gather_subset': ['license_info'], 'vserver': 'svm', 'continue_on_error': 'always'} + info = call_main(my_main, DEFAULT_ARGS, module_args) + error = {'error': zapi_error_message('Error calling API license-v2-list-info')} + assert info is not None + assert info['ontap_info']['license_info'] == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py new file mode 100644 index 000000000..129caa635 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_interface.py @@ -0,0 +1,1778 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings,\ + assert_warning_was_raised, print_warnings, call_main, create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_interface \ + import NetAppOntapInterface as interface_module, main as my_main + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def interface_info(dns=True, address='2.2.2.2', netmask='1.1.1.1'): + info = { + 'attributes-list': { + 'net-interface-info': { + 'interface-name': 'abc_if', + 'administrative-status': 'up', + 'failover-group': 'failover_group', + 'failover-policy': 'up', + 'firewall-policy': 'up', + 'is-auto-revert': 'true', + 'home-node': 'node', + 'current-node': 'node', + 'home-port': 'e0c', + 'current-port': 'e0c', + 'address': address, + 'netmask': netmask, + 'role': 'data', + 'listen-for-dns-query': 'true', + 'is-dns-update-enabled': 'true', + 'is-ipv4-link-local': 'false', + 'service-policy': 'service_policy', + } + } + } + if dns: + info['attributes-list']['net-interface-info']['dns-domain-name'] = 'test.com' + return info + + +node_info = { + 'attributes-list': { + 'cluster-node-info': { + 'node-name': 'node_1', + } + } +} + + +ZRR = zapi_responses({ + 'interface_info': build_zapi_response(interface_info(), 1), + 'interface_ipv4': build_zapi_response(interface_info(address='10.10.10.13', netmask='255.255.255.0'), 1), + 'interface_info_no_dns': build_zapi_response(interface_info(dns=False), 1), + 'node_info': build_zapi_response(node_info, 1), + 'error_17': build_zapi_error(17, 'A LIF with the same name already exists'), + 'error_13003': build_zapi_error(13003, 'ZAPI is not enabled in pre-cluster mode.'), +}) + + +DEFAULT_ARGS = { + 'hostname': '10.10.10.10', + 'username': 'admin', + 'password': 'password', + 'home_port': 'e0c', + 'interface_name': 'abc_if', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + module_args = { + 'vserver': 'vserver', + 'use_rest': 'never' + } + error = create_module(interface_module, module_args, fail=True)['msg'] + assert 'missing required arguments:' in error + assert 'interface_name' in error + + +def test_create_error_missing_param(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ]) + module_args = { + 'vserver': 'vserver', + 'home_node': 'node', + 'use_rest': 'never' + } + msg = 'Error: Missing one or more required parameters for creating interface:' + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg in error + assert 'role' in error + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'net-interface-create', ZRR['success']) + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'home_node': 'node', + 'role': 'data', + # 'subnet_name': 'subnet_name', + 'address': '10.10.10.13', + 'netmask': '255.255.255.0', + 'failover_policy': 'system-defined', + 'failover_group': 'failover_group', + 'firewall_policy': 'firewall_policy', + 'is_auto_revert': True, + 'admin_status': 'down', + 'force_subnet_association': True, + 'dns_domain_name': 'dns_domain_name', + 'listen_for_dns_query': True, + 'is_dns_update_enabled': True, + # 'is_ipv4_link_local': False, + 'service_policy': 'service_policy' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ip_subnet_cidr_mask(): + ''' Test successful modify ip/subnet mask ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ('ZAPI', 'net-interface-get-iter', ZRR['interface_ipv4']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'home_node': 'node', + 'role': 'data', + 'address': '10.10.10.13', + 'netmask': '24' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_for_NVMe(): + ''' Test successful create for NVMe protocol''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-node-get-iter', ZRR['node_info']), + ('ZAPI', 'net-interface-create', ZRR['success']), + ]) + module_args = { + 'vserver': 'vserver', + # 'home_node': 'node', + 'role': 'data', + 'protocols': ['fc-nvme'], + 'subnet_name': 'subnet_name', + 'use_rest': 'never' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_idempotency_for_NVMe(): + ''' Test successful create for NVMe protocol''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ]) + module_args = { + 'vserver': 'vserver', + 'home_node': 'node', + 'role': 'data', + 'protocols': ['fc-nvme'], + 'use_rest': 'never' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_error_for_NVMe(): + ''' Test if create throws an error if required param 'protocols' uses NVMe''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ]) + msg = 'Error: Following parameters for creating interface are not supported for data-protocol fc-nvme:' + module_args = { + 'vserver': 'vserver', + 'protocols': ['fc-nvme'], + 'address': '1.1.1.1', + 'use_rest': 'never' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg in error + for option in ('netmask', 'address', 'firewall_policy'): + assert option in error + + +def test_create_idempotency(): + ''' Test create idempotency, and ignore EMS logging error ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ]) + module_args = { + 'vserver': 'vserver', + 'use_rest': 'never' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete(): + ''' Test delete existing interface, and ignore EMS logging error ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info_no_dns']), + ('ZAPI', 'net-interface-modify', ZRR['success']), # offline + ('ZAPI', 'net-interface-delete', ZRR['success']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'vserver', + 'use_rest': 'never' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'vserver', + 'use_rest': 'never' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + ''' Test successful modify interface_minutes ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ]) + module_args = { + 'vserver': 'vserver', + 'dns_domain_name': 'test2.com', + 'home_port': 'e0d', + 'is_dns_update_enabled': False, + 'is_ipv4_link_local': True, + 'listen_for_dns_query': False, + 'use_rest': 'never' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_idempotency(): + ''' Test modify idempotency ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ]) + module_args = { + 'vserver': 'vserver', + 'use_rest': 'never' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_message(): + register_responses([ + # create, missing params + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-node-get-iter', ZRR['no_records']), + + # create - get home_node error + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-node-get-iter', ZRR['error']), + + # create error + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-node-get-iter', ZRR['error_13003']), + ('ZAPI', 'net-interface-create', ZRR['error']), + + # create error + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-node-get-iter', ZRR['no_records']), + ('ZAPI', 'net-interface-create', ZRR['error_17']), + + # modify error + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['error']), + + # rename error + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-rename', ZRR['error']), + + # delete error + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ('ZAPI', 'net-interface-delete', ZRR['error']), + + # get error + ('ZAPI', 'net-interface-get-iter', ZRR['error']), + ]) + module_args = { + 'vserver': 'vserver', + 'use_rest': 'never', + } + msg = 'Error: Missing one or more required parameters for creating interface:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['home_port'] = 'e0d' + module_args['role'] = 'data' + module_args['address'] = '10.11.12.13' + module_args['netmask'] = '255.192.0.0' + msg = 'Error fetching node for interface abc_if: NetApp API failed. Reason - 12345:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = 'Error Creating interface abc_if: NetApp API failed. Reason - 12345:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + # LIF already exists (error 17) + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['home_port'] = 'new_port' + msg = 'Error modifying interface abc_if: NetApp API failed. Reason - 12345:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['from_name'] = 'old_name' + msg = 'Error renaming old_name to abc_if: NetApp API failed. Reason - 12345:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['state'] = 'absent' + msg = 'Error deleting interface abc_if: NetApp API failed. Reason - 12345:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = 'Error fetching interface details for abc_if: NetApp API failed. Reason - 12345:' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successful_rename(): + ''' Test successful ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-rename', ZRR['success']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ]) + module_args = { + 'vserver': 'vserver', + 'dns_domain_name': 'test2.com', + 'from_name': 'from_interface_name', + 'home_port': 'new_port', + 'is_dns_update_enabled': False, + 'listen_for_dns_query': False, + 'use_rest': 'never' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_rename_not_found(): + ''' Test from interface not found ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ('ZAPI', 'net-interface-get-iter', ZRR['no_records']), + ]) + msg = 'Error renaming interface abc_if: no interface with from_name from_interface_name.' + module_args = { + 'vserver': 'vserver', + 'dns_domain_name': 'test2.com', + 'from_name': 'from_interface_name', + 'home_port': 'new_port', + 'is_dns_update_enabled': False, + 'listen_for_dns_query': False, + 'use_rest': 'never' + } + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successful_migrate(): + ''' Test successful ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ('ZAPI', 'net-interface-migrate', ZRR['success']), + ('ZAPI', 'net-interface-migrate', ZRR['success']), + ]) + module_args = { + 'vserver': 'vserver', + 'dns_domain_name': 'test2.com', + 'current_node': 'new_node', + 'is_dns_update_enabled': False, + 'listen_for_dns_query': False, + 'use_rest': 'never' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_migrate(): + ''' Test successful ''' + register_responses([ + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + + # 2nd try + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ('ZAPI', 'net-interface-migrate', ZRR['error']), + + # 3rd try + ('ZAPI', 'net-interface-get-iter', ZRR['interface_info']), + ('ZAPI', 'net-interface-modify', ZRR['success']), + ('ZAPI', 'net-interface-migrate', ZRR['success']), + ('ZAPI', 'net-interface-migrate', ZRR['error']), + ]) + module_args = { + 'vserver': 'vserver', + 'dns_domain_name': 'test2.com', + 'current_port': 'new_port', + 'is_dns_update_enabled': False, + 'listen_for_dns_query': False, + 'use_rest': 'never' + } + msg = 'current_node must be set to migrate' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['current_node'] = 'new_node' + msg = 'Error migrating new_node: NetApp API failed. Reason - 12345' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = 'Error migrating new_node: NetApp API failed. Reason - 12345' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +SRR = rest_responses({ + 'one_record_home_node': (200, {'records': [ + {'name': 'node2_abc_if', + 'uuid': '54321', + 'enabled': True, + 'location': {'home_port': {'name': 'e0c'}, 'home_node': {'name': 'node2'}, 'node': {'name': 'node2'}, 'port': {'name': 'e0c'}} + }]}, None), + 'one_record_vserver': (200, {'records': [{ + 'name': 'abc_if', + 'uuid': '54321', + 'svm': {'name': 'vserver', 'uuid': 'svm_uuid'}, + 'dns_zone': 'netapp.com', + 'ddns_enabled': True, + 'data_protocol': ['nfs'], + 'enabled': True, + 'ip': {'address': '10.11.12.13', 'netmask': '10'}, + 'location': { + 'home_port': {'name': 'e0c'}, + 'home_node': {'name': 'node2'}, + 'node': {'name': 'node2'}, + 'port': {'name': 'e0c'}, + 'auto_revert': True, + 'failover': True + }, + 'service_policy': {'name': 'data-mgmt'}, + 'probe_port': 65431 + }]}, None), + 'one_record_vserver_subnet1': (200, {'records': [{ + 'name': 'abc_if', + 'uuid': '54321', + 'svm': {'name': 'vserver', 'uuid': 'svm_uuid'}, + 'dns_zone': 'netapp.com', + 'ddns_enabled': True, + 'data_protocol': ['nfs'], + 'enabled': True, + 'ip': {'address': '10.11.12.13', 'netmask': '10'}, + 'location': { + 'home_port': {'name': 'e0c'}, + 'home_node': {'name': 'node2'}, + 'node': {'name': 'node2'}, + 'port': {'name': 'e0c'}, + 'auto_revert': True, + 'failover': True + }, + 'service_policy': {'name': 'data-mgmt'}, + 'subnet': {'name': 'subnet1'} + }]}, None), + 'one_record_fcp': (200, {'records': [{ + 'data_protocol': 'fcp', + 'enabled': False, + 'location': { + 'home_node': {'name': 'ontap910-01', 'uuid': 'ecb4061b'}, + 'home_port': {'name': '1a', 'node': {'name': 'ontap910-01'}, 'uuid': '1c9a72de'}, + 'is_home': True, + 'node': {'name': 'ontap910-01', 'uuid': 'ecb4061b'}, + 'port': {'name': '1a', 'node': {'name': 'ontap910-01'}, 'uuid': '1c9a72de'} + }, + 'name': 'abc_if', + 'svm': {'name': 'svm0_iscsi', 'uuid': 'a59e775d'}, + 'uuid': 'a3935ab5' + }]}, None), + 'two_records': (200, {'records': [{'name': 'node2_abc_if'}, {'name': 'node2_abc_if'}]}, None), + 'error_precluster': (500, None, {'message': 'are available in precluster.'}), + 'cluster_identity': (200, {'location': 'Oz', 'name': 'abc'}, None), + 'nodes': (200, {'records': [ + {'name': 'node2', 'uuid': 'uuid2', 'cluster_interfaces': [{'ip': {'address': '10.10.10.2'}}]} + ]}, None), + 'nodes_two_records': (200, {'records': [ + {'name': 'node2', 'uuid': 'uuid2', 'cluster_interfaces': [{'ip': {'address': '10.10.10.2'}}]}, + {'name': 'node3', 'uuid': 'uuid2', 'cluster_interfaces': [{'ip': {'address': '10.10.10.2'}}]} + ]}, None), +}, False) + + +def test_rest_create_ip_no_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ('POST', 'network/ip/interfaces', SRR['success']), # post + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_create_ip_no_svm_idempotent(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_create_ip_no_svm_idempotent_localhost(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'home_node': 'localhost', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_create_ip_with_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ('POST', 'network/ip/interfaces', SRR['success']), # post + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'vserver': 'vserver', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_create_fc_with_svm(): + ''' create FC interface ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/fc/interfaces', SRR['zero_records']), # get FC + ('POST', 'network/fc/interfaces', SRR['success']), # post + ]) + module_args = { + 'use_rest': 'always', + 'vserver': 'vserver', + 'data_protocol': 'fc_nvme', + 'home_node': 'my_node', + 'protocols': 'fc-nvme' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_create_fc_with_svm_no_home_port(): + ''' create FC interface ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/fc/interfaces', SRR['zero_records']), # get FC + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ('POST', 'network/fc/interfaces', SRR['success']), # post + ]) + args = dict(DEFAULT_ARGS) + module_args = { + 'use_rest': 'always', + 'vserver': 'vserver', + 'data_protocol': 'fc_nvme', + 'protocols': 'fc-nvme', + 'current_port': args.pop('home_port'), + 'current_node': 'my_node', + } + assert call_main(my_main, args, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_create_ip_with_cluster_svm(dont_sleep): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ('POST', 'network/ip/interfaces', SRR['one_record_vserver']), # post + ('PATCH', 'network/ip/interfaces/54321', SRR['one_record_vserver']), # migrate + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), # get IP + ]) + module_args = { + 'use_rest': 'always', + 'admin_status': 'up', + 'current_port': 'e0c', + 'failover_scope': 'home_node_only', + 'ipspace': 'cluster', + 'vserver': 'vserver', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'role': 'intercluster', + 'probe_port': 65431, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('Ignoring vserver with REST for non data SVM.') + + +def test_rest_negative_create_ip(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'cluster/nodes', SRR['zero_records']), # get nodes + ]) + msg = 'Error: Cannot guess home_node, home_node is required when home_port is present with REST.' + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + } + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_create_ip_with_svm_no_home_port(): + ''' create FC interface ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + # ('POST', 'network/fc/interfaces', SRR['success']), # post + ]) + args = dict(DEFAULT_ARGS) + args.pop('home_port') + module_args = { + 'use_rest': 'always', + 'vserver': 'vserver', + 'interface_type': 'ip', + } + error = "Error: At least one of 'broadcast_domain', 'home_port', 'home_node' is required to create an IP interface." + assert error in call_main(my_main, args, module_args, fail=True)['msg'] + + +def test_rest_negative_create_no_ip_address(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ]) + msg = 'Error: Missing one or more required parameters for creating interface: interface_type.' + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + } + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_get_fc_no_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ]) + module_args = { + 'use_rest': 'always', + 'interface_type': 'fc', + } + msg = "A data 'vserver' is required for FC interfaces." + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_get_multiple_ip_if(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['two_records']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ]) + msg = 'Error: multiple records for: node2_abc_if' + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + } + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_get_multiple_fc_if(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'network/fc/interfaces', SRR['two_records']), # get FC + ]) + msg = 'Error: unexpected records for name: abc_if, vserver: not_cluster' + module_args = { + 'use_rest': 'always', + 'vserver': 'not_cluster', + } + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_get_multiple_ip_fc_if(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), # get IP + ('GET', 'network/fc/interfaces', SRR['one_record_vserver']), # get FC + ]) + msg = 'Error fetching interface abc_if - found duplicate entries, please indicate interface_type.' + module_args = { + 'use_rest': 'always', + 'vserver': 'not_cluster', + } + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_modify_idempotent_ip_no_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_ip_no_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'home_node': 'node2', + 'interface_name': 'new_name', + 'from_name': 'abc_if' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_modify_ip_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), # get IP + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'vserver': 'vserver', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'home_node': 'node1', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_migrate_ip_no_svm(sleep_mock): + ''' create cluster ''' + modified = copy.deepcopy(SRR['one_record_home_node']) + modified[1]['records'][0]['location']['node']['name'] = 'node1' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes (for get) + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get - no change + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', modified), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'current_node': 'node1', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_migrate_ip_no_svm_port(sleep_mock): + ''' create cluster ''' + modified = copy.deepcopy(SRR['one_record_home_node']) + modified[1]['records'][0]['location']['port']['name'] = 'port1' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes (for get) + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get - no change + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', modified), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'current_port': 'port1', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_migrate_ip_svm(sleep_mock): + ''' create cluster ''' + modified = copy.deepcopy(SRR['one_record_home_node']) + modified[1]['records'][0]['location']['node']['name'] = 'node1' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes (for get) + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', modified), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'current_node': 'node1', + 'vserver': 'vserver' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_migrate_ip_error(sleep_mock): + ''' create cluster ''' + modified = copy.deepcopy(SRR['one_record_home_node']) + modified[1]['records'][0]['location']['node']['name'] = 'node1' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes (for get) + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'current_node': 'node1', + 'vserver': 'vserver' + } + error = rest_error_message('Errors waiting for migration to complete', 'network/ip/interfaces') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_rest_migrate_ip_timeout(sleep_mock): + ''' create cluster ''' + modified = copy.deepcopy(SRR['one_record_home_node']) + modified[1]['records'][0]['location']['node']['name'] = 'node1' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes (for get) + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'current_node': 'node1', + 'vserver': 'vserver' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_warning_was_raised('Failed to confirm interface is migrated after 120 seconds') + + +def test_rest_create_migrate_fc_error(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/fc/interfaces', SRR['empty_records']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/fc/interfaces', SRR['one_record_fcp']) + ]) + module_args = { + 'use_rest': 'always', + 'home_node': 'ontap910-01', + 'current_node': 'ontap910-02', + 'current_port': '1b', + 'interface_type': 'fc', + 'vserver': 'svm0_iscsi' + } + error = 'Error: Missing one or more required parameters for creating interface' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['home_port'] = '1a' + error = 'Error: cannot migrate FC interface' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_delete_ip_no_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), # get IP + ('GET', 'cluster/nodes', SRR['nodes']), # get nodes (for get) + ('DELETE', 'network/ip/interfaces/54321', SRR['success']), # delete + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'state': 'absent', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_disable_delete_fc(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/fc/interfaces', SRR['one_record_vserver']), # get IP + ('PATCH', 'network/fc/interfaces/54321', SRR['success']), # disable fc before delete + ('DELETE', 'network/fc/interfaces/54321', SRR['success']), # delete + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent', + "admin_status": "up", + "protocols": "fc-nvme", + "role": "data", + "vserver": "svm3", + "current_port": "1a" + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_delete_idempotent_ip_no_svm(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), # get IP + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'address': '10.12.12.13', + 'netmask': '255.255.192.0', + 'state': 'absent', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_derive_fc_protocol_fcp(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['fcp'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.derive_fc_data_protocol() + assert my_obj.parameters['data_protocol'] == 'fcp' + + +def test_derive_fc_protocol_nvme(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-nvme'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.derive_fc_data_protocol() + assert my_obj.parameters['data_protocol'] == 'fc_nvme' + + +def test_derive_fc_protocol_empty(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': [], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + assert my_obj.derive_fc_data_protocol() is None + + +def test_negative_derive_fc_protocol_nvme(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-nvme', 'fcp'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + msg = "A single protocol entry is expected for FC interface, got ['fc-nvme', 'fcp']." + assert msg in expect_and_capture_ansible_exception(my_obj.derive_fc_data_protocol, 'fail')['msg'] + + +def test_negative_derive_fc_protocol_nvme_mismatch(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-nvme'], + 'data_protocol': 'fcp' + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + msg = "Error: mismatch between configured data_protocol: fcp and data_protocols: ['fc-nvme']" + assert msg in expect_and_capture_ansible_exception(my_obj.derive_fc_data_protocol, 'fail')['msg'] + + +def test_negative_derive_fc_protocol_unexpected(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-unknown'], + 'data_protocol': 'fcp' + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + msg = "Unexpected protocol value fc-unknown." + assert msg in expect_and_capture_ansible_exception(my_obj.derive_fc_data_protocol, 'fail')['msg'] + + +def test_derive_interface_type_nvme(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-nvme'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.derive_interface_type() + assert my_obj.parameters['interface_type'] == 'fc' + + +def test_derive_interface_type_iscsi(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'protocols': ['iscsi'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.derive_interface_type() + assert my_obj.parameters['interface_type'] == 'ip' + + +def test_derive_interface_type_cluster(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'role': 'cluster', + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.derive_interface_type() + assert my_obj.parameters['interface_type'] == 'ip' + + +def test_negative_derive_interface_type_nvme_mismatch(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + msg = "Error: mismatch between configured interface_type: ip and derived interface_type: fc." + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-nvme'], + 'interface_type': 'ip' + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + assert msg in expect_and_capture_ansible_exception(my_obj.derive_interface_type, 'fail')['msg'] + + +def test_negative_derive_interface_type_unknown(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + msg = "Error: unable to determine interface type, please set interface_type: unexpected value(s) for protocols: ['unexpected']" + module_args = { + 'use_rest': 'always', + 'protocols': ['unexpected'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + assert msg in expect_and_capture_ansible_exception(my_obj.derive_interface_type, 'fail')['msg'] + + +def test_negative_derive_interface_type_multiple(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + msg = "Error: unable to determine interface type, please set interface_type: incompatible value(s) for protocols: ['fc-nvme', 'cifs']" + module_args = { + 'use_rest': 'always', + 'protocols': ['fc-nvme', 'cifs'], + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + assert msg in expect_and_capture_ansible_exception(my_obj.derive_interface_type, 'fail')['msg'] + + +def test_derive_block_file_type_fcp(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + block_p, file_p, fcp = my_obj.derive_block_file_type(['fcp']) + assert block_p + assert not file_p + assert fcp + module_args['interface_type'] = 'fc' + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + block_p, file_p, fcp = my_obj.derive_block_file_type(None) + assert block_p + assert not file_p + assert fcp + + +def test_derive_block_file_type_iscsi(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + block_p, file_p, fcp = my_obj.derive_block_file_type(['iscsi']) + assert block_p + assert not file_p + assert not fcp + + +def test_derive_block_file_type_cifs(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + block_p, file_p, fcp = my_obj.derive_block_file_type(['cifs']) + assert not block_p + assert file_p + assert not fcp + + +def test_derive_block_file_type_mixed(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + error = "Cannot use any of ['fcp'] with ['cifs']" + assert expect_and_capture_ansible_exception(my_obj.derive_block_file_type, 'fail', ['cifs', 'fcp'])['msg'] == error + + +def test_map_failover_policy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'failover_policy': 'local-only', + } + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.map_failover_policy() + assert my_obj.parameters['failover_scope'] == 'home_node_only' + + +def test_rest_negative_unsupported_zapi_option_fail(): + ''' create cluster ''' + register_responses([ + ]) + msg = "REST API currently does not support 'is_ipv4_link_local'" + module_args = { + 'use_rest': 'always', + 'ipspace': 'cluster', + 'is_ipv4_link_local': True, + } + assert msg in create_module(interface_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_rest_only_option(): + ''' create cluster ''' + register_responses([ + ]) + msg = "probe_port requires REST." + module_args = { + 'use_rest': 'never', + 'ipspace': 'cluster', + 'probe_port': 65431, + } + assert msg in create_module(interface_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_unsupported_zapi_option_force_zapi_1(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + msg = "missing required argument with ZAPI: vserver" + module_args = { + 'use_rest': 'auto', + 'ipspace': 'cluster', + 'is_ipv4_link_local': True, + } + assert msg in create_module(interface_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_rest_negative_unsupported_zapi_option_force_zapi_2(mock_netapp_lib): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + mock_netapp_lib.return_value = False + msg = "the python NetApp-Lib module is required" + module_args = { + 'use_rest': 'auto', + 'ipspace': 'cluster', + 'is_ipv4_link_local': True, + } + assert msg in create_module(interface_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_negative_unsupported_rest_version(): + ''' create cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + msg = "Error: REST requires ONTAP 9.7 or later for interface APIs." + module_args = {'use_rest': 'always'} + assert msg == create_module(interface_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_auto_falls_back_to_zapi_if_ip_9_6(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + module_args = {'use_rest': 'auto'} + # vserver is a required parameter with ZAPI + msg = "missing required argument with ZAPI: vserver" + assert msg in create_module(interface_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print_warnings + assert_warning_was_raised('Falling back to ZAPI: REST requires ONTAP 9.7 or later for interface APIs.') + + +def test_fix_errors(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']) + ]) + module_args = {'use_rest': 'auto'} + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + control = {'xx': 11, 'yy': 22} + # no role in error + errors = dict(control) + assert my_obj.fix_errors(None, errors) is None + assert errors == control + # role/firewall_policy/protocols/service_policy -> service_policy + tests = [ + ('data', 'data', ['nfs'], None, 'default-data-files', True), + ('data', 'data', ['cifs'], None, 'default-data-files', True), + ('data', 'data', ['iscsi'], None, 'default-data-blocks', True), + ('data', '', ['fc-nvme'], None, 'unchanged', True), + ('data', 'mgmt', ['ignored'], None, 'default-management', True), + ('data', '', ['nfs'], None, 'default-data-files', True), + ('data', '', ['cifs'], None, 'default-data-files', True), + ('data', '', ['iscsi'], None, 'default-data-blocks', True), + ('data', 'mgmt', ['ignored'], None, 'default-management', True), + ('intercluster', 'intercluster', ['ignored'], None, 'default-intercluster', True), + ('intercluster', '', ['ignored'], None, 'default-intercluster', True), + ('cluster', 'mgmt', ['ignored'], None, 'default-cluster', True), + ('cluster', '', ['ignored'], None, 'default-cluster', True), + ('cluster', 'other', ['ignored'], None, 'unchanged', False), + ] + for role, firewall_policy, protocols, service_policy, expected_service_policy, fixed in tests: + my_obj.parameters['protocols'] = protocols + if service_policy: + my_obj['service_policy'] = service_policy + options = {'service_policy': 'unchanged'} + errors = dict(control) + errors['role'] = role + if firewall_policy: + errors['firewall_policy'] = firewall_policy + assert my_obj.fix_errors(options, errors) is None + print('OPTIONS', options) + assert 'service_policy' in options + assert options['service_policy'] == expected_service_policy + assert errors == control or not fixed + assert fixed or 'role' in errors + + +def test_error_messages_get_interface_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'network/ip/interfaces', SRR['two_records']), # get IP + ('GET', 'cluster/nodes', SRR['generic_error']), # get nodes + # second call + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), # get IP + ('GET', 'network/fc/interfaces', SRR['generic_error']), # get FC + # third call + ('GET', 'network/ip/interfaces', SRR['generic_error']), # get IP + ('GET', 'network/fc/interfaces', SRR['one_record_vserver']), # get FC + # fourth call + ('GET', 'network/ip/interfaces', SRR['generic_error']), # get IP + ('GET', 'network/fc/interfaces', SRR['generic_error']), # get FC + # fifth call + ('GET', 'network/ip/interfaces', SRR['error_precluster']), # get IP + ]) + module_args = {'use_rest': 'auto'} + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + # first call + error = 'Error fetching cluster node info' + assert expect_and_capture_ansible_exception(my_obj.get_interface_rest, 'fail', 'my_lif')['msg'] == rest_error_message(error, 'cluster/nodes') + # second call + # reset value, as it was set for ip + del my_obj.parameters['interface_type'] + my_obj.parameters['vserver'] = 'not_cluster' + assert my_obj.get_interface_rest('my_lif') is not None + # third call + # reset value, as it was set for ip + del my_obj.parameters['interface_type'] + my_obj.parameters['vserver'] = 'not_cluster' + assert my_obj.get_interface_rest('my_lif') is not None + # fourth call + # reset value, as it was set for fc + del my_obj.parameters['interface_type'] + error = expect_and_capture_ansible_exception(my_obj.get_interface_rest, 'fail', 'my_lif')['msg'] + assert rest_error_message('Error fetching interface details for my_lif', 'network/ip/interfaces') in error + assert rest_error_message('', 'network/fc/interfaces') in error + # fifth call + error = 'This module cannot use REST in precluster mode, ZAPI can be forced with use_rest: never.' + assert error in expect_and_capture_ansible_exception(my_obj.get_interface_rest, 'fail', 'my_lif')['msg'] + + +def test_error_messages_rest_find_interface(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['nodes_two_records']), # get nodes + ]) + module_args = {'use_rest': 'auto'} + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + # no calls + # no interface type + error = 'Error: missing option "interface_type (or could not be derived)' + assert error in expect_and_capture_ansible_exception(my_obj.get_net_int_api, 'fail')['msg'] + # multiple records for cluster + records = [ + {'name': 'node_name'}, + {'name': 'node_name'} + ] + error = 'Error: multiple records for: node_name - %s' % records + assert error in expect_and_capture_ansible_exception(my_obj.find_interface_record, 'fail', records, 'node', 'name')['msg'] + # multiple records with vserver + records = [1, 2] + my_obj.parameters['vserver'] = 'vserver' + error = 'Error: unexpected records for name: name, vserver: vserver - [1, 2]' + assert error in expect_and_capture_ansible_exception(my_obj.find_exact_match, 'fail', records, 'name')['msg'] + # multiple records with ambiguity, home_node set (warn) + del my_obj.parameters['vserver'] + my_obj.parameters['home_node'] = 'node' + records = [ + {'name': 'node_name'}, + {'name': 'node_name'} + ] + error = 'Error: multiple records for: node_name - %s' % records + assert error in expect_and_capture_ansible_exception(my_obj.find_exact_match, 'fail', records, 'name')['msg'] + records = [ + {'name': 'node_name'}, + {'name': 'name'} + ] + record = my_obj.find_exact_match(records, 'name') + assert record == {'name': 'node_name'} + assert_warning_was_raised("Found both ['name', 'node_name'], selecting node_name") + # fifth call (get nodes, cached) + # multiple records with different home nodes + del my_obj.parameters['home_node'] + records = [ + {'name': 'node2_name'}, + {'name': 'node3_name'} + ] + error = "Error: multiple matches for name: name: ['node2_name', 'node3_name']. Set home_node parameter." + assert error in expect_and_capture_ansible_exception(my_obj.find_exact_match, 'fail', records, 'name')['msg'] + # multiple records with home node and no home node + records = [ + {'name': 'node2_name'}, + {'name': 'name'} + ] + error = "Error: multiple matches for name: name: ['name', 'node2_name']. Set home_node parameter." + assert error in expect_and_capture_ansible_exception(my_obj.find_exact_match, 'fail', records, 'name')['msg'] + # sixth call + error = "Error: multiple matches for name: name: ['name', 'node2_name']. Set home_node parameter." + assert error in expect_and_capture_ansible_exception(my_obj.find_exact_match, 'fail', records, 'name')['msg'] + + +def test_error_messages_rest_misc(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('POST', 'network/type/interfaces', SRR['generic_error']), + ('PATCH', 'network/type/interfaces/uuid', SRR['generic_error']), + ('DELETE', 'network/type/interfaces/uuid', SRR['generic_error']), + ]) + module_args = {'use_rest': 'auto'} + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + # no calls + # no interface type + error = 'Error, expecting uuid in existing record' + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_payloads, 'fail', 'delete', {}, {})['msg'] + my_obj.parameters['interface_type'] = 'type' + error = rest_error_message('Error creating interface abc_if', 'network/type/interfaces') + assert error in expect_and_capture_ansible_exception(my_obj.create_interface_rest, 'fail', {})['msg'] + error = rest_error_message('Error modifying interface abc_if', 'network/type/interfaces/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.modify_interface_rest, 'fail', 'uuid', {'xxx': 'yyy'})['msg'] + error = rest_error_message('Error deleting interface abc_if', 'network/type/interfaces/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.delete_interface_rest, 'fail', 'uuid')['msg'] + + +def test_error_messages_build_rest_body_and_validations(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = {'use_rest': 'always'} + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + my_obj.parameters['home_node'] = 'node1' + my_obj.parameters['protocols'] = ['nfs'] + my_obj.parameters['role'] = 'intercluster' + error = 'Error: Missing one or more required parameters for creating interface: interface_type.' + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + my_obj.parameters['interface_type'] = 'type' + error = 'Error: unexpected value for interface_type: type.' + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + my_obj.parameters['interface_type'] = 'ip' + my_obj.parameters['ipspace'] = 'ipspace' + error = 'Error: Protocol cannot be specified for intercluster role, failed to create interface.' + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + del my_obj.parameters['protocols'] + my_obj.parameters['interface_type'] = 'fc' + error = "Error: 'home_port' is not supported for FC interfaces with 9.7, use 'current_port', avoid home_node." + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + print_warnings() + assert_warning_was_raised("Avoid 'home_node' with FC interfaces with 9.7, use 'current_node'.") + del my_obj.parameters['home_port'] + error = "Error: A data 'vserver' is required for FC interfaces." + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + my_obj.parameters['current_port'] = '0a' + my_obj.parameters['data_protocol'] = 'fc' + my_obj.parameters['force_subnet_association'] = True + my_obj.parameters['failover_group'] = 'failover_group' + my_obj.parameters['vserver'] = 'vserver' + error = "Error: 'role' is deprecated, and 'data' is the only value supported for FC interfaces: found intercluster." + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + my_obj.parameters['role'] = 'data' + error = "Error creating interface, unsupported options: {'failover_group': 'failover_group'}" + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + del my_obj.parameters['failover_group'] + my_obj.parameters['broadcast_domain'] = 'BDD1' + error = "Error: broadcast_domain option only supported for IP interfaces: abc_if, interface_type: fc" + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail', None)['msg'] + my_obj.parameters['service_policy'] = 'svc_pol' + error = "Error: 'service_policy' is not supported for FC interfaces." + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail', None)['msg'] + del my_obj.parameters['service_policy'] + my_obj.parameters['probe_port'] = 65431 + error = "Error: 'probe_port' is not supported for FC interfaces." + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail', None)['msg'] + print_warnings() + assert_warning_was_raised('Ignoring force_subnet_association') + my_obj.parameters['interface_type'] = 'ip' + del my_obj.parameters['vserver'] + del my_obj.parameters['ipspace'] + error = 'Error: ipspace name must be provided if scope is cluster, or vserver for svm scope.' + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail')['msg'] + modify = {'ipspace': 'ipspace'} + error = "The following option cannot be modified: ipspace.name" + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail', modify)['msg'] + del my_obj.parameters['role'] + my_obj.parameters['current_port'] = 'port1' + my_obj.parameters['home_port'] = 'port1' + my_obj.parameters['ipspace'] = 'ipspace' + error = "Error: home_port and broadcast_domain are mutually exclusive for creating: abc_if" + assert error in expect_and_capture_ansible_exception(my_obj.build_rest_body, 'fail', None)['msg'] + + +def test_dns_domain_ddns_enabled(): + ''' domain and ddns enabled option test ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('POST', 'network/ip/interfaces', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'network/fc/interfaces', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + module_args = { + 'use_rest': 'always', + 'address': '10.11.12.13', + 'netmask': '255.192.0.0', + 'vserver': 'vserver', + 'dns_domain_name': 'netapp1.com', + 'is_dns_update_enabled': False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + del module_args['address'] + del module_args['netmask'] + args = {'data_protocol': 'fc_nvme', 'home_node': 'my_node', 'protocols': 'fc-nvme', 'interface_type': 'fc'} + module_args.update(args) + assert 'dns_domain_name, is_dns_update_enabled options only supported for IP interfaces' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Error: Minimum version of ONTAP for is_dns_update_enabled is (9, 9, 1).' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_subnet_name(): + ''' domain and ddns enabled option test ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('POST', 'network/ip/interfaces', SRR['success']), + # idemptocency + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver_subnet1']), + # modify subnet + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver_subnet1']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + # error cases + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/fc/interfaces', SRR['zero_records']), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'Default', + 'subnet_name': 'subnet1', + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['subnet_name'] = 'subnet2' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert 'Minimum version of ONTAP for subnet_name is (9, 11, 1)' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + args = {'data_protocol': 'fc_nvme', 'home_node': 'my_node', 'protocols': 'fc-nvme', 'interface_type': 'fc'} + module_args.update(args) + assert 'subnet_name option only supported for IP interfaces' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_warning_was_raised('ipspace is ignored for FC interfaces.') + + +def test_fail_if_subnet_conflicts(): + ''' domain and ddns enabled option test ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('POST', 'network/ip/interfaces', SRR['success']), + # idemptocency + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), + # modify subnet + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + # error cases + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/fc/interfaces', SRR['zero_records']), + ]) + module_args = { + 'use_rest': 'always', + 'ipspace': 'Default', + 'fail_if_subnet_conflicts': False, + 'vserver': 'vserver', + 'address': '10.11.12.13', + 'netmask': '255.192.0.0', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['address'] = '10.11.12.14' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert 'Minimum version of ONTAP for fail_if_subnet_conflicts is (9, 11, 1)' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + args = {'data_protocol': 'fc_nvme', 'home_node': 'my_node', 'protocols': 'fc-nvme', 'interface_type': 'fc'} + module_args.update(args) + assert 'fail_if_subnet_conflicts option only supported for IP interfaces' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_warning_was_raised('ipspace is ignored for FC interfaces.') + + +def check_options(my_obj, parameters, exp_options, exp_migrate_options, exp_errors): + options, migrate_options, errors = my_obj.set_options_rest(parameters) + assert options == exp_options + assert migrate_options == exp_migrate_options + assert errors == exp_errors + + +def test_set_options_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + # ('GET', 'cluster/nodes', SRR['nodes']), + ]) + module_args = {'use_rest': 'always'} + my_obj = create_module(interface_module, DEFAULT_ARGS, module_args) + parameters = None + my_obj.parameters = { + 'interface_type': 'other' + } + check_options(my_obj, parameters, {}, {}, {}) + # unknown modify options + check_options(my_obj, {'x': 'y'}, {}, {}, {}) + # valid options + my_obj.parameters = { + 'interface_type': 'ip', + 'fail_if_subnet_conflicts': False + } + check_options(my_obj, parameters, {'fail_if_subnet_conflicts': False}, {}, {}) + check_options(my_obj, {'subnet_name': 'subnet1'}, {'subnet.name': 'subnet1'}, {}, {}) + my_obj.parameters['home_node'] = 'node1' + check_options(my_obj, {'home_node': 'node1', 'home_port': 'port1'}, {'location': {'home_port': {'name': 'port1', 'node': {'name': 'node1'}}}}, {}, {}) + my_obj.parameters['current_node'] = 'node1' + check_options(my_obj, {'current_node': 'node1', 'current_port': 'port1'}, {}, {'location': {'port': {'name': 'port1', 'node': {'name': 'node1'}}}}, {}) + + +def test_not_throw_warnings_in_rename(): + ''' assert no warnings raised during rename ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'network/ip/interfaces', SRR['zero_records']), + ('GET', 'network/ip/interfaces', SRR['one_record_vserver']), + ('GET', 'cluster/nodes', SRR['nodes']), + ('PATCH', 'network/ip/interfaces/54321', SRR['success']), + ]) + module_args = { + "from_name": "abc_if", + "interface_name": "abc_if_update", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_no_warnings() + + +def test_throw_warnings_modify_rename(): + ''' assert warnings raised when interface_name does not have node name in it. ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'network/ip/interfaces', SRR['one_record_home_node']), + ('GET', 'cluster/nodes', SRR['nodes']) + ]) + assert not call_main(my_main, DEFAULT_ARGS)['changed'] + print_warnings() + # current record name is 'node2_abc_if' and interface_name does not have node name in it. + # adjust to avoid rename attempt. + assert_warning_was_raised('adjusting name from abc_if to node2_abc_if') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py new file mode 100644 index 000000000..9a23f06b9 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ipspace.py @@ -0,0 +1,189 @@ +# (c) 2018, NTT Europe Ltd. +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit test for Ansible module: na_ontap_ipspace """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ipspace \ + import NetAppOntapIpspace as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + "hostname": "10.10.10.10", + "username": "admin", + "password": "netapp1!", + "validate_certs": "no", + "https": "yes", + "state": "present", + "name": "test_ipspace" +} + + +ipspace_info = { + 'num-records': 1, + 'attributes-list': { + 'net-ipspaces-info': { + 'ipspace': 'test_ipspace' + } + } +} + +ipspace_info_renamed = { + 'num-records': 1, + 'attributes-list': { + 'net-ipspaces-info': { + 'ipspace': 'test_ipspace_renamed' + } + } +} + +ZRR = zapi_responses({ + 'ipspace_info': build_zapi_response(ipspace_info), + 'ipspace_info_renamed': build_zapi_response(ipspace_info_renamed), +}) + +SRR = rest_responses({ + 'ipspace_record': (200, {'records': [{ + "name": "test_ipspace", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"}]}, None), + 'ipspace_record_renamed': (200, {'records': [{ + "name": "test_ipspace_renamed", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"}]}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +def test_get_ipspace_iscalled(): + ''' test if get_ipspace() is called ''' + register_responses([ + ('net-ipspaces-get-iter', ZRR['empty']) + ]) + ipsace_obj = create_module(my_module, DEFAULT_ARGS, {'use_rest': 'never'}) + result = ipsace_obj.get_ipspace('dummy') + assert result is None + + +def test_ipspace_apply_iscalled(): + ''' test if apply() is called - create and rename''' + register_responses([ + # create + ('net-ipspaces-get-iter', ZRR['empty']), + ('net-ipspaces-create', ZRR['success']), + # create idempotent check + ('net-ipspaces-get-iter', ZRR['ipspace_info']), + # rename + ('net-ipspaces-get-iter', ZRR['empty']), + ('net-ipspaces-get-iter', ZRR['ipspace_info']), + ('net-ipspaces-rename', ZRR['success']), + # rename idempotent check + ('net-ipspaces-get-iter', ZRR['ipspace_info_renamed']), + # delete + ('net-ipspaces-get-iter', ZRR['ipspace_info']), + ('net-ipspaces-destroy', ZRR['success']) + ]) + args = {'use_rest': 'never'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + args['from_name'] = 'test_ipspace' + args['name'] = 'test_ipspace_renamed' + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + args = {'use_rest': 'never', 'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_create_rest(): + ''' Test successful create and idempotent check''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['empty_records']), + ('POST', 'network/ipspaces', SRR['success']), + # idempotent + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['ipspace_record']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_successful_delete_rest(): + ''' Test successful delete and idempotent check''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['ipspace_record']), + ('DELETE', 'network/ipspaces/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + # idempotent + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['empty_records']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_successful_rename_rest(): + ''' Test successful rename and idempotent check''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['empty_records']), + ('GET', 'network/ipspaces', SRR['ipspace_record']), + ('PATCH', 'network/ipspaces/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + # idempotent + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['ipspace_record_renamed']) + ]) + args = {'from_name': 'test_ipspace', 'name': 'test_ipspace_renamed'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception_zapi_rest(): + register_responses([ + # zapi + ('net-ipspaces-get-iter', ZRR['error']), + ('net-ipspaces-create', ZRR['error']), + ('net-ipspaces-rename', ZRR['error']), + ('net-ipspaces-destroy', ZRR['error']), + # REST + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/ipspaces', SRR['generic_error']), + ('POST', 'network/ipspaces', SRR['generic_error']), + ('PATCH', 'network/ipspaces/abdcdef', SRR['generic_error']), + ('DELETE', 'network/ipspaces/abdcdef', SRR['generic_error']) + + ]) + my_obj = create_module(my_module, DEFAULT_ARGS, {'from_name': 'test_ipspace_rename', 'use_rest': 'never'}) + assert 'Error getting ipspace' in expect_and_capture_ansible_exception(my_obj.get_ipspace, 'fail')['msg'] + assert 'Error provisioning ipspace' in expect_and_capture_ansible_exception(my_obj.create_ipspace, 'fail')['msg'] + assert 'Error renaming ipspace' in expect_and_capture_ansible_exception(my_obj.rename_ipspace, 'fail')['msg'] + assert 'Error removing ipspace' in expect_and_capture_ansible_exception(my_obj.delete_ipspace, 'fail')['msg'] + + my_obj = create_module(my_module, DEFAULT_ARGS, {'from_name': 'test_ipspace_rename'}) + my_obj.uuid = 'abdcdef' + assert 'Error getting ipspace' in expect_and_capture_ansible_exception(my_obj.get_ipspace, 'fail')['msg'] + assert 'Error provisioning ipspace' in expect_and_capture_ansible_exception(my_obj.create_ipspace, 'fail')['msg'] + assert 'Error renaming ipspace' in expect_and_capture_ansible_exception(my_obj.rename_ipspace, 'fail')['msg'] + assert 'Error removing ipspace' in expect_and_capture_ansible_exception(my_obj.delete_ipspace, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi.py new file mode 100644 index 000000000..4d0a53fda --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi.py @@ -0,0 +1,339 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_iscsi ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_iscsi \ + import NetAppOntapISCSI as iscsi_module # module under test +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + "hostname": "10.10.10.10", + "username": "admin", + "password": "netapp1!", + "validate_certs": "no", + "https": "yes", + "state": "present", + "use_rest": "never", + "vserver": "svm1", + "service_state": "started" +} + + +iscsi_info_started = { + 'num-records': 1, + 'attributes-list': { + 'iscsi-service-info': { + 'is-available': 'true', + 'vserver': 'svm1' + } + } +} + +iscsi_info_stopped = { + 'num-records': 1, + 'attributes-list': { + 'iscsi-service-info': { + 'is-available': 'false', + 'vserver': 'svm1' + } + } +} + +ZRR = zapi_responses({ + 'iscsi_started': build_zapi_response(iscsi_info_started), + 'iscsi_stopped': build_zapi_response(iscsi_info_stopped) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + iscsi_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_iscsi(): + register_responses([ + ('iscsi-service-get-iter', ZRR['empty']) + ]) + iscsi_obj = create_module(iscsi_module, DEFAULT_ARGS) + result = iscsi_obj.get_iscsi() + assert not result + + +def test_get_existing_iscsi(): + register_responses([ + ('iscsi-service-get-iter', ZRR['iscsi_started']) + ]) + iscsi_obj = create_module(iscsi_module, DEFAULT_ARGS) + result = iscsi_obj.get_iscsi() + assert result + + +def test_successfully_create(): + register_responses([ + ('iscsi-service-get-iter', ZRR['empty']), + ('iscsi-service-create', ZRR['success']) + ]) + assert create_and_apply(iscsi_module, DEFAULT_ARGS)['changed'] + + +def test_create_idempotency(): + register_responses([ + ('iscsi-service-get-iter', ZRR['iscsi_started']) + ]) + assert create_and_apply(iscsi_module, DEFAULT_ARGS)['changed'] is False + + +def test_successfully_create_stop_service(): + register_responses([ + ('iscsi-service-get-iter', ZRR['empty']), + ('iscsi-service-create', ZRR['success']) + ]) + args = {'service_state': 'stopped'} + assert create_and_apply(iscsi_module, DEFAULT_ARGS, args)['changed'] + + +def test_successfully_delete_when_service_started(): + register_responses([ + ('iscsi-service-get-iter', ZRR['iscsi_started']), + ('iscsi-service-stop', ZRR['success']), + ('iscsi-service-destroy', ZRR['success']) + ]) + args = {'state': 'absent'} + assert create_and_apply(iscsi_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_idempotent(): + register_responses([ + ('iscsi-service-get-iter', ZRR['empty']) + ]) + args = {'state': 'absent'} + assert create_and_apply(iscsi_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_start_iscsi(): + register_responses([ + ('iscsi-service-get-iter', ZRR['iscsi_stopped']), + ('iscsi-service-start', ZRR['success']) + ]) + assert create_and_apply(iscsi_module, DEFAULT_ARGS)['changed'] + + +def test_stop_iscsi(): + register_responses([ + ('iscsi-service-get-iter', ZRR['iscsi_started']), + ('iscsi-service-stop', ZRR['success']) + ]) + args = {'service_state': 'stopped'} + assert create_and_apply(iscsi_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('iscsi-service-get-iter', ZRR['error']), + ('iscsi-service-create', ZRR['error']), + ('iscsi-service-start', ZRR['error']), + ('iscsi-service-stop', ZRR['error']), + ('iscsi-service-destroy', ZRR['error']) + ]) + + iscsi_obj = create_module(iscsi_module, DEFAULT_ARGS) + + error = expect_and_capture_ansible_exception(iscsi_obj.get_iscsi, 'fail')['msg'] + assert 'Error finding iscsi service in svm1: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(iscsi_obj.create_iscsi_service, 'fail')['msg'] + assert 'Error creating iscsi service: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(iscsi_obj.start_iscsi_service, 'fail')['msg'] + assert 'Error starting iscsi service on vserver svm1: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(iscsi_obj.stop_iscsi_service, 'fail')['msg'] + assert 'Error Stopping iscsi service on vserver svm1: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(iscsi_obj.delete_iscsi_service, 'fail', {'service_state': 'stopped'})['msg'] + assert 'Error deleting iscsi service on vserver svm1: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +SRR = rest_responses({ + 'iscsi_started': (200, {"records": [ + { + "svm": {"uuid": "d08434fae1-a8a8-11fg-aa26-005055fhs3e5"}, + "enabled": True, + 'target': {'alias': 'ansibleSVM'} + } + ], "num_records": 1}, None), + 'iscsi_record': (200, {"records": [ + { + "svm": {"uuid": "d08434fae1-a8a8-11fg-aa26-005055fhs3e5"}, + "enabled": True, + 'target': {'alias': 'ansibleSVM'} + } + ], "num_records": 1}, None), + 'iscsi_stopped': (200, {"records": [ + { + "svm": {"uuid": "d08434fae1-a8a8-11fg-aa26-005055fhs3e5"}, + "enabled": False, + 'target': {'alias': 'ansibleSVM'} + } + ], "num_records": 1}, None), +}) + + +ARGS_REST = { + "hostname": "10.10.10.10", + "username": "admin", + "password": "netapp1!", + "validate_certs": "no", + "https": "yes", + "state": "present", + "use_rest": "always", + "vserver": "svm1", + "service_state": "started", + "target_alias": "ansibleSVM" +} + + +def test_successfully_create_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['empty_records']), + ('POST', 'protocols/san/iscsi/services', SRR['success']) + ]) + assert create_and_apply(iscsi_module, ARGS_REST, {'use_rest': 'always'})['changed'] + + +def test_create_idempotency_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['iscsi_started']), + ]) + assert create_and_apply(iscsi_module, ARGS_REST, {'use_rest': 'always'})['changed'] is False + + +def test_successfully_create_stop_service_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['empty_records']), + ('POST', 'protocols/san/iscsi/services', SRR['success']) + ]) + args = {'service_state': 'stopped'} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] + + +def test_successfully_delete_when_service_started_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['iscsi_started']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ('DELETE', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ]) + args = {'state': 'absent'} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['empty_records']), + ]) + args = {'state': 'absent'} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] is False + + +def test_start_iscsi_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['iscsi_stopped']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ]) + args = {'service_state': 'started'} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] + + +def test_modify_iscsi_target_alias_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['iscsi_started']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ]) + args = {"target_alias": "ansibleSVM_test"} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] + + +def test_modify_iscsi_target_alias_and_state_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['iscsi_stopped']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ]) + args = {"target_alias": "ansibleSVM_test", 'service_state': 'started'} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] + + +def test_stop_iscsi_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['iscsi_started']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['success']), + ]) + args = {'service_state': 'stopped'} + assert create_and_apply(iscsi_module, ARGS_REST, args)['changed'] + + +def test_if_all_methods_catch_exception_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/san/iscsi/services', SRR['generic_error']), + ('POST', 'protocols/san/iscsi/services', SRR['generic_error']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['generic_error']), + ('PATCH', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['generic_error']), + ('DELETE', 'protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5', SRR['generic_error']) + ]) + + iscsi_obj = create_module(iscsi_module, ARGS_REST, {'use_rest': 'always'}) + iscsi_obj.uuid = "d08434fae1-a8a8-11fg-aa26-005055fhs3e5" + + error = expect_and_capture_ansible_exception(iscsi_obj.get_iscsi_rest, 'fail')['msg'] + msg = 'Error finding iscsi service in svm1: calling: protocols/san/iscsi/services: got Expected error.' + assert msg in error + + error = expect_and_capture_ansible_exception(iscsi_obj.create_iscsi_service_rest, 'fail')['msg'] + msg = 'Error creating iscsi service: calling: protocols/san/iscsi/services: got Expected error.' + assert msg in error + + error = expect_and_capture_ansible_exception(iscsi_obj.start_or_stop_iscsi_service_rest, 'fail', 'started')['msg'] + msg = 'Error starting iscsi service on vserver svm1: calling: protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5: got Expected error.' + assert msg in error + + error = expect_and_capture_ansible_exception(iscsi_obj.start_or_stop_iscsi_service_rest, 'fail', 'stopped')['msg'] + msg = 'Error stopping iscsi service on vserver svm1: calling: protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5: got Expected error.' + assert msg in error + + error = expect_and_capture_ansible_exception(iscsi_obj.delete_iscsi_service_rest, 'fail', {'service_state': 'stopped'})['msg'] + msg = 'Error deleting iscsi service on vserver svm1: calling: protocols/san/iscsi/services/d08434fae1-a8a8-11fg-aa26-005055fhs3e5: got Expected error.' + assert msg in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py new file mode 100644 index 000000000..4cc168f2e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_iscsi_security.py @@ -0,0 +1,195 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_iscsi_security ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception, call_main, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_iscsi_security \ + import NetAppONTAPIscsiSecurity as iscsi_object, main as iscsi_module_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'get_uuid': (200, {"records": [{"uuid": "e2e89ccc-db35-11e9"}]}, None), + 'get_initiator': (200, {"records": [ + { + "svm": { + "uuid": "e2e89ccc-db35-11e9", + "name": "test_ansible" + }, + "initiator": "eui.0123456789abcdef", + "authentication_type": "chap", + "chap": { + "inbound": { + "user": "test_user_1" + }, + "outbound": { + "user": "test_user_2" + } + }, + "initiator_address": { + "ranges": [ + { + "start": "10.125.10.0", + "end": "10.125.10.10", + "family": "ipv4" + }, + { + "start": "10.10.10.7", + "end": "10.10.10.7", + "family": "ipv4" + } + ] + } + }], "num_records": 1}, None), + 'get_initiator_no_user': (200, {"records": [ + { + "svm": { + "uuid": "e2e89ccc-db35-11e9", + "name": "test_ansible" + }, + "initiator": "eui.0123456789abcdef", + "authentication_type": "chap", + "chap": { + }, + "initiator_address": { + "ranges": [ + ] + } + }], "num_records": 1}, None), + 'get_initiator_none': (200, {"records": [ + { + "svm": { + "uuid": "e2e89ccc-db35-11e9", + "name": "test_ansible" + }, + "initiator": "eui.0123456789abcdef", + "authentication_type": "none" + }], "num_records": 1}, None), +}) + + +DEFAULT_ARGS = { + 'initiator': "eui.0123456789abcdef", + 'inbound_username': "test_user_1", + 'inbound_password': "123", + 'outbound_username': "test_user_2", + 'outbound_password': "321", + 'auth_type': "chap", + 'address_ranges': ["10.125.10.0-10.125.10.10", "10.10.10.7"], + 'hostname': 'test', + 'vserver': 'test_vserver', + 'username': 'test_user', + 'password': 'test_pass!' +} + + +def test_rest_successful_create(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['zero_records']), + ('POST', 'protocols/san/iscsi/credentials', SRR['success']), + # idempotent check + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['get_initiator']), + ]) + assert create_and_apply(iscsi_object, DEFAULT_ARGS)['changed'] + assert not create_and_apply(iscsi_object, DEFAULT_ARGS)['changed'] + + +def test_rest_successful_modify_address(): + '''Test successful rest modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['get_initiator']), + ('PATCH', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['success']) + ]) + args = {'address_ranges': ['10.10.10.8']} + assert create_and_apply(iscsi_object, DEFAULT_ARGS, args)['changed'] + + +def test_rest_successful_modify_inbound_user(): + '''Test successful rest modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['get_initiator']), + ('PATCH', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['success']) + ]) + args = {'inbound_username': 'test_user_3'} + assert create_and_apply(iscsi_object, DEFAULT_ARGS, args)['changed'] + + +def test_rest_successful_modify_outbound_user(): + '''Test successful rest modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['get_initiator']), + ('PATCH', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['success']) + ]) + args = {'outbound_username': 'test_user_3'} + assert create_and_apply(iscsi_object, DEFAULT_ARGS, args)['changed'] + + +def test_rest_successful_modify_chap_no_user(): + '''Test successful rest modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['get_initiator_no_user']), + ('PATCH', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['success']) + ]) + assert create_and_apply(iscsi_object, DEFAULT_ARGS)['changed'] + + +def test_rest_successful_modify_chap(): + '''Test successful rest modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'protocols/san/iscsi/credentials', SRR['get_initiator_none']), + ('PATCH', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['success']) + ]) + assert call_main(iscsi_module_main, DEFAULT_ARGS)['changed'] + + +def test_all_methods_catch_exception(): + ''' test exception in get/create/modify/delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['get_uuid']), + ('GET', 'svm/svms', SRR['generic_error']), + ('GET', 'svm/svms', SRR['empty_records']), + # GET/POST/PATCH error. + ('GET', 'protocols/san/iscsi/credentials', SRR['generic_error']), + ('POST', 'protocols/san/iscsi/credentials', SRR['generic_error']), + ('PATCH', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['generic_error']), + ('DELETE', 'protocols/san/iscsi/credentials/e2e89ccc-db35-11e9/eui.0123456789abcdef', SRR['generic_error']) + ]) + sec_obj = create_module(iscsi_object, DEFAULT_ARGS) + assert 'Error on fetching svm uuid' in expect_and_capture_ansible_exception(sec_obj.get_svm_uuid, 'fail')['msg'] + assert 'Error on fetching svm uuid, SVM not found' in expect_and_capture_ansible_exception(sec_obj.get_svm_uuid, 'fail')['msg'] + assert 'Error on fetching initiator' in expect_and_capture_ansible_exception(sec_obj.get_initiator, 'fail')['msg'] + assert 'Error on creating initiator' in expect_and_capture_ansible_exception(sec_obj.create_initiator, 'fail')['msg'] + assert 'Error on modifying initiator' in expect_and_capture_ansible_exception(sec_obj.modify_initiator, 'fail', {}, {})['msg'] + assert 'Error on deleting initiator' in expect_and_capture_ansible_exception(sec_obj.delete_initiator, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py new file mode 100644 index 000000000..4ccec5115 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_job_schedule.py @@ -0,0 +1,451 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_job_schedule ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_job_schedule \ + import NetAppONTAPJob as job_module, main as uut_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'name': 'test_job', + 'job_minutes': [25], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' +} + + +cron_info = { + 'num-records': 1, + 'attributes-list': { + 'job-schedule-cron-info': { + 'job-schedule-cluster': 'cluster1', + 'job-schedule-name': 'test_job', + 'job-schedule-cron-minute': {'cron-minute': 25} + } + } +} + + +multiple_cron_info = { + 'num-records': 1, + 'attributes-list': { + 'job-schedule-cron-info': { + 'job-schedule-cluster': 'cluster1', + 'job-schedule-name': 'test_job', + 'job-schedule-cron-minute': [ + {'cron-minute': '25'}, + {'cron-minute': '35'} + ], + 'job-schedule-cron-month': [ + {'cron-month': '5'}, + {'cron-month': '10'} + ] + } + } +} + + +multiple_cron_minutes_info = { + 'num-records': 1, + 'attributes-list': { + 'job-schedule-cron-info': { + 'job-schedule-cluster': 'cluster1', + 'job-schedule-name': 'test_job', + 'job-schedule-cron-minute': [{'cron-minute': str(x)} for x in range(60)], + 'job-schedule-cron-month': [ + {'cron-month': '5'}, + {'cron-month': '10'} + ] + } + } +} + + +ZRR = zapi_responses({ + 'cron_info': build_zapi_response(cron_info), + 'multiple_cron_info': build_zapi_response(multiple_cron_info), + 'multiple_cron_minutes_info': build_zapi_response(multiple_cron_minutes_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors + with python 2.6, dictionaries are not ordered + ''' + fragments = ["missing required arguments:", "hostname", "name"] + error = create_module(job_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_get_nonexistent_job(): + ''' Test if get_job_schedule returns None for non-existent job ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['no_records']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS) + assert job_obj.get_job_schedule() is None + + +def test_get_existing_job(): + ''' Test if get_job_schedule retuns job details for existing job ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['cron_info']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS) + result = job_obj.get_job_schedule() + assert result['name'] == DEFAULT_ARGS['name'] + assert result['job_minutes'] == DEFAULT_ARGS['job_minutes'] + + +def test_get_existing_job_multiple_minutes(): + # sourcery skip: class-extract-method + ''' Test if get_job_schedule retuns job details for existing job ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['multiple_cron_info']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS) + result = job_obj.get_job_schedule() + assert result['name'] == DEFAULT_ARGS['name'] + assert result['job_minutes'] == [25, 35] + assert result['job_months'] == [5, 10] + + +def test_get_existing_job_multiple_minutes_0_offset(): + ''' Test if get_job_schedule retuns job details for existing job ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['multiple_cron_info']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS, {'month_offset': 0}) + result = job_obj.get_job_schedule() + assert result['name'] == DEFAULT_ARGS['name'] + assert result['job_minutes'] == [25, 35] + assert result['job_months'] == [5, 10] + + +def test_get_existing_job_multiple_minutes_1_offset(): + ''' Test if get_job_schedule retuns job details for existing job ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['multiple_cron_info']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS, {'month_offset': 1}) + result = job_obj.get_job_schedule() + assert result['name'] == DEFAULT_ARGS['name'] + assert result['job_minutes'] == [25, 35] + assert result['job_months'] == [5 + 1, 10 + 1] + + +def test_create_error_missing_param(): + ''' Test if create throws an error if job_minutes is not specified''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['no_records']) + ]) + args = DEFAULT_ARGS.copy() + del args['job_minutes'] + error = 'Error: missing required parameter job_minutes for create' + assert error in create_and_apply(job_module, args, fail=True)['msg'] + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['no_records']), + ('job-schedule-cron-create', ZRR['success']) + ]) + assert create_and_apply(job_module, DEFAULT_ARGS)['changed'] + + +def test_successful_create_0_offset(): + ''' Test successful create ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['no_records']), + ('job-schedule-cron-create', ZRR['success']) + ]) + args = {'month_offset': 0, 'job_months': [0, 8]} + assert create_and_apply(job_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_create_1_offset(): + ''' Test successful create ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['no_records']), + ('job-schedule-cron-create', ZRR['success']) + ]) + args = {'month_offset': 1, 'job_months': [1, 9], 'cluster': 'cluster1'} + assert create_and_apply(job_module, DEFAULT_ARGS, args)['changed'] + + +def test_create_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['cron_info']) + ]) + assert not create_and_apply(job_module, DEFAULT_ARGS)['changed'] + + +def test_successful_delete(): + ''' Test delete existing job ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['cron_info']), + ('job-schedule-cron-destroy', ZRR['success']) + ]) + assert create_and_apply(job_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['no_records']) + ]) + assert not create_and_apply(job_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_successful_modify(): + ''' Test successful modify job_minutes ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['cron_info']), + ('job-schedule-cron-modify', ZRR['success']) + ]) + assert create_and_apply(job_module, DEFAULT_ARGS, {'job_minutes': '20'})['changed'] + + +def test_modify_idempotency(): + ''' Test modify idempotency ''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['cron_info']) + ]) + assert not create_and_apply(job_module, DEFAULT_ARGS)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_negative_no_netapp_lib(mock_has): + mock_has.return_value = False + error = 'the python NetApp-Lib module is required' + assert error in create_module(job_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_zapi_get_all_minutes(): + register_responses([ + ('job-schedule-cron-get-iter', ZRR['multiple_cron_minutes_info']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS) + schedule = job_obj.get_job_schedule() + assert schedule + assert 'job_minutes' in schedule + assert schedule['job_minutes'] == [-1] + + +def test_if_all_methods_catch_exception_zapi(): + ''' test error zapi - get/create/modify/delete''' + register_responses([ + ('job-schedule-cron-get-iter', ZRR['error']), + ('job-schedule-cron-create', ZRR['error']), + ('job-schedule-cron-modify', ZRR['error']), + ('job-schedule-cron-destroy', ZRR['error']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS) + + assert 'Error fetching job schedule' in expect_and_capture_ansible_exception(job_obj.get_job_schedule, 'fail')['msg'] + assert 'Error creating job schedule' in expect_and_capture_ansible_exception(job_obj.create_job_schedule, 'fail')['msg'] + assert 'Error modifying job schedule' in expect_and_capture_ansible_exception(job_obj.modify_job_schedule, 'fail', {}, {})['msg'] + assert 'Error deleting job schedule' in expect_and_capture_ansible_exception(job_obj.delete_job_schedule, 'fail')['msg'] + + +SRR = rest_responses({ + 'get_schedule': (200, {"records": [ + { + "uuid": "010df156-e0a9-11e9-9f70-005056b3df08", + "name": "test_job", + "cron": { + "minutes": [25], + "hours": [0], + "weekdays": [0], + "months": [5, 6] + } + } + ], "num_records": 1}, None), + 'get_all_minutes': (200, {"records": [ + { + "uuid": "010df156-e0a9-11e9-9f70-005056b3df08", + "name": "test_job", + "cron": { + "minutes": range(60), + "hours": [0], + "weekdays": [0], + "months": [5, 6] + } + } + ], "num_records": 1}, None) +}) + + +DEFAULT_ARGS_REST = { + 'name': 'test_job', + 'job_minutes': [25], + 'job_hours': [0], + 'job_days_of_week': [0], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always' +} + + +def test_rest_successful_create(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['zero_records']), + ('POST', 'cluster/schedules', SRR['success']), + ]) + assert create_and_apply(job_module, DEFAULT_ARGS_REST)['changed'] + + +def test_rest_create_idempotency(): + '''Test rest create idempotency''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['get_schedule']) + ]) + assert not create_and_apply(job_module, DEFAULT_ARGS_REST)['changed'] + + +def test_rest_get_0_offset(): + '''Test rest get using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['get_schedule']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS_REST, {'month_offset': 0}) + record = job_obj.get_job_schedule_rest() + assert record + assert record['job_months'] == [x - 1 for x in SRR['get_schedule'][1]['records'][0]['cron']['months']] + + +def test_rest_get_1_offset(): + '''Test rest get using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['get_schedule']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS_REST, {'month_offset': 1}) + record = job_obj.get_job_schedule_rest() + assert record + assert record['job_months'] == SRR['get_schedule'][1]['records'][0]['cron']['months'] + + +def test_rest_create_all_minutes(): + '''Test rest create using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['zero_records']), + ('POST', 'cluster/schedules', SRR['success']) + ]) + assert create_and_apply(job_module, DEFAULT_ARGS_REST, {'job_minutes': [-1]})['changed'] + + +def test_rest_create_0_offset(): + '''Test rest create using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['zero_records']), + ('POST', 'cluster/schedules', SRR['success']) + ]) + args = {'month_offset': 0, 'job_months': [0, 8]} + assert create_and_apply(job_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_rest_create_1_offset(): + '''Test rest create using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['zero_records']), + ('POST', 'cluster/schedules', SRR['success']) + ]) + args = {'month_offset': 1, 'job_months': [1, 9]} + assert create_and_apply(job_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_rest_modify_0_offset(): + '''Test rest modify using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['get_schedule']), + ('PATCH', 'cluster/schedules/010df156-e0a9-11e9-9f70-005056b3df08', SRR['success']) + ]) + args = {'month_offset': 0, 'job_months': [0, 8]} + assert create_and_apply(job_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_rest_modify_1_offset(): + '''Test rest modify using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['get_schedule']), + ('PATCH', 'cluster/schedules/010df156-e0a9-11e9-9f70-005056b3df08', SRR['success']) + ]) + args = {'month_offset': 1, 'job_months': [1, 9], 'cluster': 'cluster1'} + assert create_and_apply(job_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_negative_month_of_0(): + '''Test rest modify using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + args = {'month_offset': 1, 'job_months': [0, 9]} + error = 'Error: 0 is not a valid value in months if month_offset is set to 1' + assert error in create_module(job_module, DEFAULT_ARGS_REST, args, fail=True)['msg'] + + +def test_rest_get_all_minutes(): + '''Test rest modify using month offset''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['get_all_minutes']) + ]) + args = {'month_offset': 1, 'job_months': [1, 9]} + job_obj = create_module(job_module, DEFAULT_ARGS_REST, args) + schedule = job_obj.get_job_schedule() + assert schedule + assert 'job_minutes' in schedule + assert schedule['job_minutes'] == [-1] + + +def test_if_all_methods_catch_exception_rest(): + ''' test error zapi - get/create/modify/delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/schedules', SRR['generic_error']), + ('POST', 'cluster/schedules', SRR['generic_error']), + ('PATCH', 'cluster/schedules/abcd', SRR['generic_error']), + ('DELETE', 'cluster/schedules/abcd', SRR['generic_error']) + ]) + job_obj = create_module(job_module, DEFAULT_ARGS_REST) + job_obj.uuid = 'abcd' + assert 'Error fetching job schedule' in expect_and_capture_ansible_exception(job_obj.get_job_schedule, 'fail')['msg'] + assert 'Error creating job schedule' in expect_and_capture_ansible_exception(job_obj.create_job_schedule, 'fail')['msg'] + assert 'Error modifying job schedule' in expect_and_capture_ansible_exception(job_obj.modify_job_schedule, 'fail', {}, {})['msg'] + assert 'Error deleting job schedule' in expect_and_capture_ansible_exception(job_obj.delete_job_schedule, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_interface.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_interface.py new file mode 100644 index 000000000..ada9b4328 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_interface.py @@ -0,0 +1,107 @@ +# Copyright: NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception, call_main, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_kerberos_interface \ + import NetAppOntapKerberosInterface as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'enabled': False, + 'interface_name': 'lif1', + 'vserver': 'ansibleSVM' +} + + +SRR = rest_responses({ + 'kerberos_int_conf_enabled': (200, {"records": [{ + "spn": "nfs/life2@RELAM2", + "machine_account": "account1", + "interface": { + "ip": {"address": "10.10.10.7"}, + "name": "lif1", + "uuid": "1cd8a442" + }, + "enabled": True, + }], "num_records": 1}, None), + 'kerberos_int_conf_disabled': (200, {"records": [{ + "interface": { + "ip": {"address": "10.10.10.7"}, + "name": "lif1", + "uuid": "1cd8a442" + }, + "enabled": False, + }], "num_records": 1}, None), +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_enable_kerberos_int_conf(): + ''' enable kerberos int conf ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/nfs/kerberos/interfaces', SRR['kerberos_int_conf_disabled']), + ('PATCH', 'protocols/nfs/kerberos/interfaces/1cd8a442', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/nfs/kerberos/interfaces', SRR['kerberos_int_conf_enabled']) + ]) + args = { + "spn": "nfs/life2@RELAM2", + "machine_account": "account1", + "admin_username": "user1", + "admin_password": "pass1", + "enabled": True + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_all_methods_catch_exception(): + ''' test exception in get/create/modify/delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # GET/PATCH error. + ('GET', 'protocols/nfs/kerberos/interfaces', SRR['generic_error']), + ('PATCH', 'protocols/nfs/kerberos/interfaces/1cd8a442', SRR['generic_error']) + ]) + ker_obj = create_module(my_module, DEFAULT_ARGS) + ker_obj.uuid = '1cd8a442' + assert 'Error fetching kerberos interface' in expect_and_capture_ansible_exception(ker_obj.get_kerberos_interface, 'fail')['msg'] + assert 'Error modifying kerberos interface' in expect_and_capture_ansible_exception(ker_obj.modify_kerberos_interface, 'fail')['msg'] + + +def test_error_ontap97(): + ''' test module supported from 9.7 ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + assert 'requires ONTAP 9.7.0 or later' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py new file mode 100644 index 000000000..30f577d4c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_kerberos_realm.py @@ -0,0 +1,213 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP Kerberos Realm module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import pytest +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_kerberos_realm \ + import NetAppOntapKerberosRealm as my_module # module under test +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': True, + 'validate_certs': False, + 'use_rest': 'never', + 'realm': 'NETAPP.COM', + 'vserver': 'vserver1', + 'kdc_ip': '192.168.0.1', + 'kdc_vendor': 'other' +} + +kerberos_info = { + 'num-records': "1", + 'attributes-list': { + 'kerberos-realm': { + 'admin-server-ip': "192.168.0.1", + 'admin-server-port': "749", + 'clock-skew': "5", + 'kdc-ip': "192.168.0.1", + 'kdc-port': "88", + 'kdc-vendor': "other", + 'password-server-ip': "192.168.0.1", + 'password-server-port': "464", + "permitted-enc-types": { + "string": ["des", "des3", "aes_128", "aes_256"] + }, + 'realm': "NETAPP.COM", + 'vserver-name': "vserver1" + } + } +} + + +ZRR = zapi_responses({ + 'kerberos_info': build_zapi_response(kerberos_info) +}) + + +SRR = rest_responses({ + 'kerberos_info': (200, {"records": [{ + "svm": { + "uuid": "89368b07", + "name": "svm3" + }, + "name": "name1", + "kdc": { + "vendor": "microsoft", + "ip": "10.193.115.116", + "port": 88 + }, + "comment": "mohan", + "ad_server": { + "name": "netapp", + "address": "10.193.115.116" + } + }], "num_records": 1}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "realm", "vserver"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_module_fail_when_state_present_required_args_missing(): + ''' required arguments are reported as errors ''' + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['kdc_ip'] + del DEFAULT_ARGS_COPY['kdc_vendor'] + error = "state is present but all of the following are missing: kdc_vendor, kdc_ip" + assert error in create_module(my_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + +def test_get_existing_realm(): + ''' Test if get_krbrealm returns details for existing kerberos realm ''' + register_responses([ + ('kerberos-realm-get-iter', ZRR['kerberos_info']) + ]) + kerb_obj = create_module(my_module, DEFAULT_ARGS) + assert kerb_obj.get_krbrealm() + + +def test_successfully_modify_realm(): + ''' Test modify realm successful for modifying kdc_ip. ''' + register_responses([ + ('kerberos-realm-get-iter', ZRR['kerberos_info']), + ('kerberos-realm-modify', ZRR['success']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'kdc_ip': '10.1.1.20'}) + + +def test_successfully_delete_realm(): + ''' Test successfully delete realm ''' + register_responses([ + ('kerberos-realm-get-iter', ZRR['kerberos_info']), + ('kerberos-realm-delete', ZRR['success']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'}) + + +def test_successfully_create_realm(): + ''' Test successfully create realm ''' + register_responses([ + ('kerberos-realm-get-iter', ZRR['no_records']), + ('kerberos-realm-create', ZRR['success']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS) + + +def test_required_if(): + ''' required arguments are reported as errors ''' + error = "kdc_vendor is microsoft but all of the following are missing: ad_server_ip, ad_server_name" + assert error in create_module(my_module, DEFAULT_ARGS, {'kdc_vendor': 'microsoft'}, fail=True)['msg'] + + error = "kdc_vendor is microsoft but all of the following are missing: ad_server_name" + args = {'kdc_vendor': 'microsoft', 'ad_server_ip': '10.0.0.1'} + assert error in create_module(my_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('kerberos-realm-get-iter', ZRR['error']), + ('kerberos-realm-create', ZRR['error']), + ('kerberos-realm-modify', ZRR['error']), + ('kerberos-realm-delete', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/nfs/kerberos/realms', SRR['generic_error']), + ('POST', 'protocols/nfs/kerberos/realms', SRR['generic_error']), + ('PATCH', 'protocols/nfs/kerberos/realms/89368b07/NETAPP.COM', SRR['generic_error']), + ('DELETE', 'protocols/nfs/kerberos/realms/89368b07/NETAPP.COM', SRR['generic_error']) + ]) + kerb_obj = create_module(my_module, DEFAULT_ARGS) + assert 'Error fetching kerberos realm' in expect_and_capture_ansible_exception(kerb_obj.get_krbrealm, 'fail')['msg'] + assert 'Error creating Kerberos Realm' in expect_and_capture_ansible_exception(kerb_obj.create_krbrealm, 'fail')['msg'] + assert 'Error modifying Kerberos Realm' in expect_and_capture_ansible_exception(kerb_obj.modify_krbrealm, 'fail', {})['msg'] + assert 'Error deleting Kerberos Realm' in expect_and_capture_ansible_exception(kerb_obj.delete_krbrealm, 'fail')['msg'] + + kerb_obj = create_module(my_module, DEFAULT_ARGS, {'use_rest': 'always'}) + kerb_obj.svm_uuid = '89368b07' + assert 'Error fetching kerberos realm' in expect_and_capture_ansible_exception(kerb_obj.get_krbrealm, 'fail')['msg'] + assert 'Error creating Kerberos Realm' in expect_and_capture_ansible_exception(kerb_obj.create_krbrealm, 'fail')['msg'] + assert 'Error modifying Kerberos Realm' in expect_and_capture_ansible_exception(kerb_obj.modify_krbrealm, 'fail', {})['msg'] + assert 'Error deleting Kerberos Realm' in expect_and_capture_ansible_exception(kerb_obj.delete_krbrealm, 'fail')['msg'] + + +def test_successfully_create_realm_rest(): + ''' Test successfully create realm ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/nfs/kerberos/realms', SRR['empty_records']), + ('POST', 'protocols/nfs/kerberos/realms', SRR['success']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'use_rest': 'always'}) + + +def test_successfully_modify_realm_rest(): + ''' Test modify realm successful for modifying kdc_ip. ''' + register_responses([ + # modify ip. + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/nfs/kerberos/realms', SRR['kerberos_info']), + ('PATCH', 'protocols/nfs/kerberos/realms/89368b07/NETAPP.COM', SRR['success']), + # modify port. + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/nfs/kerberos/realms', SRR['kerberos_info']), + ('PATCH', 'protocols/nfs/kerberos/realms/89368b07/NETAPP.COM', SRR['success']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'use_rest': 'always', 'kdc_ip': '10.1.1.20'}) + assert create_and_apply(my_module, DEFAULT_ARGS, {'use_rest': 'always', 'kdc_port': '8088'}) + + +def test_successfully_delete_realm_rest(): + ''' Test successfully delete realm ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'protocols/nfs/kerberos/realms', SRR['kerberos_info']), + ('DELETE', 'protocols/nfs/kerberos/realms/89368b07/NETAPP.COM', SRR['success']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'use_rest': 'always', 'state': 'absent'}) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py new file mode 100644 index 000000000..4df8d9fee --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ldap_client.py @@ -0,0 +1,481 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_ldap_client ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, call_main, create_module, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ldap_client \ + import NetAppOntapLDAPClient as client_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'ldap_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + "servers": ['10.193.115.116'], + "schema": 'RFC-2307', + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + } + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None), + "svm": ( + 200, + {"records": [{"uuid": "671aa46e"}]}, + None) +}) + + +ldap_client_info = {'num-records': 1, + 'attributes-list': + {'ldap-client': + {'ldap-client-config': 'test_ldap', + 'schema': 'RFC-2307', + 'ldap-servers': [{"ldap-server": '10.193.115.116'}, ] + } + }, + } + +ZRR = zapi_responses({ + 'ldap_client_info': build_zapi_response(ldap_client_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'name': 'test_ldap', + 'schema': 'RFC-2307', + 'use_rest': 'never', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + client_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_client(): + ''' Test if get ldap client returns None for non-existent job ''' + register_responses([ + ('ldap-client-get-iter', ZRR['empty']) + ]) + ldap_obj = create_module(client_module, DEFAULT_ARGS) + result = ldap_obj.get_ldap_client() + assert result is None + + +def test_error_name_required_zapi(): + ''' name is required with ZAPI ''' + error = 'Error: name is a required field with ZAPI.' + assert error in create_module(client_module, DEFAULT_ARGS, {'name': None}, fail=True)['msg'] + + +def test_get_existing_client(): + ''' Test if get ldap client returns None for non-existent job ''' + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']) + ]) + ldap_obj = create_module(client_module, DEFAULT_ARGS) + result = ldap_obj.get_ldap_client() + assert result + + +def test_successfully_create_zapi(): + register_responses([ + ('ldap-client-get-iter', ZRR['empty']), + ('ldap-client-create', ZRR['success']), + ]) + module_args = { + 'name': 'test_ldap', + 'ldap_servers': ['10.193.115.116'], + 'schema': 'RFC-2307' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_zapi(): + register_responses([ + ('ldap-client-get-iter', ZRR['empty']), + ('ldap-client-create', ZRR['error']), + ]) + module_args = { + 'name': 'test_ldap', + 'ldap_servers': ['10.193.115.116'], + 'schema': 'RFC-2307' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error creating LDAP client" + assert msg in error + + +def test_error_create_ad_zapi(): + register_responses([ + ('ldap-client-get-iter', ZRR['empty']), + ('ldap-client-create', ZRR['error']), + ]) + module_args = { + 'name': 'test_ldap', + 'ad_domain': 'ad.netapp.com', + 'preferred_ad_servers': ['10.193.115.116'], + 'schema': 'RFC-2307' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error creating LDAP client" + assert msg in error + + +def test_create_idempotency(): + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']), + ]) + module_args = { + 'name': 'test_ldap', + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + 'state': 'present' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_delete(): + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']), + ('ldap-client-delete', ZRR['success']), + ]) + module_args = { + 'name': 'test_ldap', + 'ldap_servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete_zapi(): + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']), + ('ldap-client-delete', ZRR['error']), + ]) + module_args = { + 'name': 'test_ldap', + 'ldap_servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + 'state': 'absent' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error deleting LDAP client configuration" + assert msg in error + + +def test_delete_idempotency(): + register_responses([ + ('ldap-client-get-iter', ZRR['empty']), + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ldap_servers(): + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']), + ('ldap-client-modify', ZRR['success']), + ]) + module_args = { + 'name': 'test_ldap', + 'ldap_servers': ['10.195.64.121'], + 'schema': 'RFC-2307', + 'ldaps_enabled': True, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ldap_ad_servers(): + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']), + ('ldap-client-modify', ZRR['success']), + ]) + module_args = { + 'name': 'test_ldap', + 'ad_domain': 'ad.netapp.com', + 'preferred_ad_servers': ['10.195.64.121'], + 'schema': 'RFC-2307', + 'ldaps_enabled': True, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ldap_schema_zapi(): + register_responses([ + ('ldap-client-get-iter', ZRR['ldap_client_info']), + ('ldap-client-modify', ZRR['success']), + ]) + module_args = { + 'name': 'test_ldap', + 'ldap_servers': ['10.195.64.121'], + 'schema': 'MS-AD-BIS', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ldap-client-create', ZRR['error']), + ('ldap-client-delete', ZRR['error']), + ('ldap-client-modify', ZRR['error']) + ]) + module_args = {'name': 'test_ldap'} + my_obj = create_module(client_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.create_ldap_client, 'fail')['msg'] + assert 'Error creating LDAP client test_ldap: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_ldap_client, 'fail')['msg'] + assert 'Error deleting LDAP client configuration test_ldap: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.modify_ldap_client, 'fail', 'ldap-client-modify')['msg'] + assert 'Error modifying LDAP client test_ldap: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'vserver', + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307', +} + + +def test_get_nonexistent_ldap_config_rest(): + ''' Test if get_unix_user returns None for non-existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['empty_records']), + ]) + ldap_obj = create_module(client_module, ARGS_REST) + result = ldap_obj.get_ldap_client_rest() + assert result is None + + +def test_get_existent_ldap_config_rest(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ]) + ldap_obj = create_module(client_module, ARGS_REST) + result = ldap_obj.get_ldap_client_rest() + assert result + + +def test_get_error_ldap_config_rest(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['generic_error']), + ]) + error = call_main(my_main, ARGS_REST, fail=True)['msg'] + msg = "Error on getting idap client info:" + assert msg in error + + +def test_create_ldap_client_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['empty_records']), + ('GET', 'svm/svms', SRR['svm']), + ('POST', 'name-services/ldap', SRR['empty_good']), + ]) + module_args = { + 'ldap_servers': ['10.193.115.116'], + 'schema': 'RFC-2307' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_create_ldap_client_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['empty_records']), + ('GET', 'svm/svms', SRR['svm']), + ('POST', 'name-services/ldap', SRR['generic_error']), + ]) + module_args = { + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on creating ldap client:" + assert msg in error + + +def test_delete_ldap_client_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ('DELETE', 'name-services/ldap/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + 'state': 'absent' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_delete_ldap_client_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ('DELETE', 'name-services/ldap/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + 'state': 'absent' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on deleting ldap client rest:" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ]) + module_args = { + 'state': 'present', + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_on_cluster_vserver(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['empty_records']), + ('GET', 'svm/svms', SRR['empty_records']), + ]) + module_args = { + 'state': 'present', + 'servers': ['10.193.115.116'], + 'schema': 'RFC-2307', + } + assert 'is not a data vserver.' in call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['empty_records']) + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_modify_schema_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ('PATCH', 'name-services/ldap/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + 'state': 'present', + 'servers': ['10.193.115.116'], + 'schema': 'AD-IDMU', + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_modify_ldap_servers_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ('PATCH', 'name-services/ldap/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + 'state': 'present', + 'servers': ['10.195.64.121'], + 'schema': 'AD-IDMU', + 'ldaps_enabled': True, + 'skip_config_validation': True + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_negative_modify_ldap_servers_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ('PATCH', 'name-services/ldap/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']) + ]) + module_args = { + 'state': 'present', + 'servers': ['10.195.64.121'], + 'schema': 'AD-IDMU', + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on modifying ldap client config:" + assert msg in error + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_error_no_server(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/ldap', SRR['ldap_record']), + ]) + args = dict(ARGS_REST) + args.pop('servers') + error = 'Required one of servers or ad_domain' + assert error in call_main(my_main, args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license.py new file mode 100644 index 000000000..1683d2577 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license.py @@ -0,0 +1,432 @@ +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP license Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import sys +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, call_main, create_module, expect_and_capture_ansible_exception, patch_ansible, print_warnings +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, build_zapi_error, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_license import NetAppOntapLicense as my_module, main as my_main, HAS_DEEPDIFF + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def license_status(fcp_method): + return { + 'license-v2-status': [ + {'license-v2-status-info': + { + 'package': 'base', + 'method': 'site' + }}, + {'license-v2-status-info': + { + 'package': 'capacitypool', + 'method': 'none' + }}, + {'license-v2-status-info': + { + 'package': 'cifs', + 'method': 'site' + }}, + {'license-v2-status-info': + { + 'package': 'fcp', + 'method': fcp_method + }}, + ] + } + + +ZRR = zapi_responses({ + 'license_status_fcp_none': build_zapi_response(license_status('none')), + 'license_status_fcp_site': build_zapi_response(license_status('site')), + 'error_object_not_found': build_zapi_error('15661', 'license is not active') +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', +} + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + module_args = { + "use_rest": "never" + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_module_add_license_zapi(): + ''' Test add license ''' + register_responses([ + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_none']), + ('ZAPI', 'license-v2-add', ZRR['success']), + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_site']), + ]) + module_args = { + 'use_rest': 'never', + 'license_codes': 'LICENSECODE', + } + print('ZRR', build_zapi_response(license_status('site'))[0].to_string()) + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_module_add_license_idempotent_zapi(): + ''' Test add license idempotent ''' + register_responses([ + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_site']), + ('ZAPI', 'license-v2-add', ZRR['success']), + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_site']), + ]) + module_args = { + 'use_rest': 'never', + 'license_codes': 'LICENSECODE', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_module_remove_license_zapi(): + ''' Test remove license ''' + register_responses([ + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_site']), + ('ZAPI', 'license-v2-delete', ZRR['success']), + ('ZAPI', 'license-v2-delete', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'serial_number': '1-8-000000', + 'license_names': 'cifs,fcp', + 'state': 'absent', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_module_remove_license_idempotent_zapi(): + ''' Test remove license idempotent ''' + register_responses([ + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_site']), + ('ZAPI', 'license-v2-delete', ZRR['error_object_not_found']), + ('ZAPI', 'license-v2-delete', ZRR['error_object_not_found']), + ]) + module_args = { + 'use_rest': 'never', + 'serial_number': '1-8-000000', + 'license_names': 'cifs,fcp', + 'state': 'absent', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_module_remove_unused_expired_zapi(): + ''' Test remove unused expired license ''' + register_responses([ + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_site']), + ('ZAPI', 'license-v2-delete-unused', ZRR['success']), + ('ZAPI', 'license-v2-delete-expired', ZRR['success']), + ('ZAPI', 'license-v2-status-list-info', ZRR['license_status_fcp_none']), + ]) + module_args = { + 'use_rest': 'never', + 'remove_unused': True, + 'remove_expired': True, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_module_try_to_remove_non_existent_package_license_zapi(): + ''' Try to remove non existent license ''' + register_responses([ + ('ZAPI', 'license-v2-delete', ZRR['error_object_not_found']), + ]) + module_args = { + 'use_rest': 'never', + 'serial_number': '1-8-000000', + 'license_names': 'cifs', + 'state': 'absent', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + license_exist = my_obj.remove_licenses('cifs') + assert not license_exist + + +def test_module_error_add_license_zapi(): + ''' Test error add license ''' + register_responses([ + ('ZAPI', 'license-v2-add', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'license_codes': 'random', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'Error adding licenses' in expect_and_capture_ansible_exception(my_obj.add_licenses, 'fail')['msg'] + + +def test_module_error_remove_license_zapi(): + ''' Test error remove license ''' + register_responses([ + ('ZAPI', 'license-v2-delete', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'serial_number': '1-8-000000', + 'license_names': 'random', + 'state': 'absent', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'Error removing license' in expect_and_capture_ansible_exception(my_obj.remove_licenses, 'fail', 'random')['msg'] + + +def test_module_error_get_and_remove_unused_expired_license_zapi(): + ''' Test error get and remove unused/expired license ''' + register_responses([ + ('ZAPI', 'license-v2-status-list-info', ZRR['error']), + ('ZAPI', 'license-v2-delete-unused', ZRR['error']), + ('ZAPI', 'license-v2-delete-expired', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'Error checking license status' in expect_and_capture_ansible_exception(my_obj.get_licensing_status, 'fail')['msg'] + assert 'Error removing unused licenses' in expect_and_capture_ansible_exception(my_obj.remove_unused_licenses, 'fail')['msg'] + assert 'Error removing expired licenses' in expect_and_capture_ansible_exception(my_obj.remove_expired_licenses, 'fail')['msg'] + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'error_entry_does_not_exist': (404, None, "entry doesn't exist"), + 'license_record': (200, { + "num_records": 3, + "records": [ + { + "name": "base", + "scope": "cluster", + "state": "compliant" + }, + { + "name": "nfs", + "scope": "not_available", + "state": "unlicensed" + }, + { + "name": "cifs", + "scope": "site", + "state": "compliant" + }] + }, None), + 'license_record_nfs': (200, { + "num_records": 3, + "records": [ + { + "name": "base", + "scope": "cluster", + "state": "compliant" + }, + { + "name": "nfs", + "scope": "site", + "state": "compliant" + }, + { + "name": "cifs", + "scope": "site", + "state": "compliant" + }] + }, None), + 'license_record_no_nfs': (200, { + "num_records": 3, + "records": [ + { + "name": "base", + "scope": "cluster", + "state": "compliant" + }, + { + "name": "cifs", + "scope": "site", + "state": "compliant" + }] + }, None) +}, False) + + +def test_module_fail_when_unsupported_rest_present(): + ''' error if unsupported rest properties present ''' + register_responses([ + ]) + module_args = { + 'remove_unused': True, + 'remove_expired': True, + 'use_rest': 'always' + } + error = 'REST API currently does not support' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_ensure_get_license_status_called_rest(): + ''' test get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + ]) + module_args = { + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_no_warnings() + + +def test_module_error_get_license_rest(): + ''' test add license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always' + } + error = rest_error_message('', 'cluster/licensing/licenses') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_module_add_license_rest(): + ''' test add license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), # get license information + ('POST', 'cluster/licensing/licenses', SRR['empty_good']), # Apply license + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), # get updated license information + ]) + module_args = { + 'license_codes': 'LICENCECODE', + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + if HAS_DEEPDIFF: + assert_no_warnings() + + +def test_module_error_add_license_rest(): + ''' test add license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), # get license information + ('POST', 'cluster/licensing/licenses', SRR['generic_error']), # Error in adding license + ]) + module_args = { + 'license_codes': 'INVALIDLICENCECODE', + 'use_rest': 'always' + } + error = 'calling: cluster/licensing/licenses: got Expected error.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_module_remove_license(): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + ('DELETE', 'cluster/licensing/licenses/nfs', SRR['empty_good']), # remove license + ]) + module_args = { + 'license_names': 'nfs', + 'serial_number': '1-23-45678', + 'state': 'absent', + 'use_rest': 'always' + } + print_warnings() + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_module_error_remove_license_rest(): + ''' test remove license error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), # get license information + ('DELETE', 'cluster/licensing/licenses/nfs', SRR['generic_error']), # Error in removing license + ]) + module_args = { + 'license_names': 'nfs', + 'serial_number': '1-23-45678', + 'state': 'absent', + 'use_rest': 'always' + } + error = rest_error_message('Error removing license for serial number 1-23-45678 and nfs', 'cluster/licensing/licenses/nfs') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_module_try_to_remove_license_not_present_rest(): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + ('DELETE', 'cluster/licensing/licenses/nfs', SRR['error_entry_does_not_exist']), # license not active. + + ]) + module_args = { + 'license_names': 'nfs', + 'serial_number': '1-23-45678', + 'state': 'absent', + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_no_warnings() + + +@patch('time.sleep') +def test_error_mismatch_in_package_list_rest(dont_sleep): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + # 2nd test + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + # 3rd test + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ]) + module_args = { + 'license_names': 'non-existent-package', + 'serial_number': '1-23-45678', + 'use_rest': 'always' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + previous_license_status = {'base': 'compliant', 'nfs': 'unlicensed', 'cifs': 'compliant'} + assert my_obj.compare_license_status(previous_license_status) == [] + previous_license_status = {'base': 'compliant', 'nfs': 'unlicensed', 'cifs': 'unlicensed'} + assert my_obj.compare_license_status(previous_license_status) == ['cifs'] + error = "Error: mismatch in license package names: 'nfs'. Expected:" + assert error in expect_and_capture_ansible_exception(my_obj.compare_license_status, 'fail', previous_license_status)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license_nlf.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license_nlf.py new file mode 100644 index 000000000..b4128499d --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_license_nlf.py @@ -0,0 +1,461 @@ +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP license Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import sys +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, assert_warning_was_raised, call_main, create_module, expect_and_capture_ansible_exception, patch_ansible, print_warnings +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_license import NetAppOntapLicense as my_module, main as my_main, HAS_DEEPDIFF + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', +} + +NLF = """ +{"statusResp":{"statusCode":"SUCCESS","message":"Information sent successfully","filter":"SOA","serialNumber":"12345678","cmatID":"0000000", +"product":"%s","version":"2","licenses":{"legacyKey":"Generate NetApp License File (NLF)","HostID":"12345678","type":"capacity", +"package":["CIFS","NFS","S3","FCP","iSCSI","NVMe_oF","FlexClone","SnapRestore","SnapMirror","SnapMirror_Sync","SnapManagerSuite","SnapVault","S3_SnapMirror","VE","TPM"], +"capacity":"1","evaluation":"false","entitlementLastUpdated":"2023-01-04T07:58:16.000-07:00","licenseScope":"node","licenseProtocol":"ENT_ENCRYPT_ED_CAP_3", +"enforcementAttributes":[{"name":"DO-Capacity-Warn","metric":"5:1", +"msg":"You've exceeded your capacity limit. Add capacity to your license to ensure your product use is unaffected.","operatingPolicy":"na"}, +{"name":"DO-Capacity-Enforce","metric":"6:1", +"msg":"You've exceeded your capacity limit. Add capacity to your license to ensure your product use is unaffected.","operatingPolicy":"ndo"}]}}, +"Signature":"xxxx"} +""".replace('\n', '') + +NLF_EE = NLF % "Enterprise Edition" +NLF_CB = NLF % "Core Bundle" + +NLF_MULTIPLE = "%s\n%s" % (NLF_EE, NLF_CB) + +NLF_DICT_NO_PRODUCT = {"statusResp": {"serialNumber": "12345678"}} +NLF_DICT_NO_SERIAL = {"statusResp": {"product": "Enterprise Edition"}} +NLF_DICT_PRODUCT_SN = {"statusResp": {"product": "Enterprise Edition", "serialNumber": "12345678"}} +NLF_DICT_PRODUCT_SN_STAR = {"statusResp": {"product": "Enterprise Edition", "serialNumber": "*"}} + + +def test_module_error_zapi_not_supported(): + ''' Test add license ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + 'license_codes': [NLF_EE], + } + error = 'Error: NLF license format is not supported with ZAPI.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + 'use_rest': 'never', + 'license_codes': [NLF_EE], + 'state': 'absent' + } + error = 'Error: NLF license format is not supported with ZAPI.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'error_entry_does_not_exist': (404, None, "entry doesn't exist"), + 'license_record': (200, { + "num_records": 3, + "records": [ + { + "name": "base", + "scope": "cluster", + "state": "compliant" + }, + { + "name": "nfs", + "scope": "not_available", + "state": "unlicensed" + }, + { + "name": "cifs", + "scope": "site", + "state": "compliant" + }] + }, None), + 'license_record_nfs': (200, { + "num_records": 3, + "records": [ + { + "name": "base", + "scope": "cluster", + "state": "compliant" + }, + { + "name": "nfs", + "scope": "site", + "state": "compliant", + "licenses": [ + { + "installed_license": "Enterprise Edition", + "serial_number": "12345678", + "maximum_size": 1099511627776 + } + + ] + }, + { + "name": "cifs", + "scope": "site", + "state": "compliant" + }] + }, None), + 'license_record_no_nfs': (200, { + "num_records": 3, + "records": [ + { + "name": "base", + "scope": "cluster", + "state": "compliant" + }, + { + "name": "cifs", + "scope": "site", + "state": "compliant" + }] + }, None), + 'conflict_error': (409, None, 'license with conflicts error message'), + 'failed_to_install_error': (400, None, + 'Failed to install the license at index 0. The system received a licensing request with an invalid digital signature.'), +}, False) + + +def test_module_add_nlf_license_rest(): + ''' test add license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), # get license information + ('POST', 'cluster/licensing/licenses', SRR['empty_good']), # Apply license + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), # get updated license information + ]) + module_args = { + 'license_codes': [NLF_EE], + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + if HAS_DEEPDIFF: + assert_no_warnings() + + +def test_module_error_add_nlf_license_rest(): + ''' test add license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + ('POST', 'cluster/licensing/licenses', SRR['conflict_error']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), # get updated license information + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + ('POST', 'cluster/licensing/licenses', SRR['failed_to_install_error']), + ]) + module_args = { + 'license_codes': [NLF_EE], + 'use_rest': 'always' + } + error = rest_error_message('Error: some licenses were updated, but others were in conflict', 'cluster/licensing/licenses', + got='got license with conflicts error message') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + if HAS_DEEPDIFF: + assert_no_warnings() + error = rest_error_message('Error adding license', 'cluster/licensing/licenses', + got='got Failed to install the license at index 0') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + if HAS_DEEPDIFF: + assert_no_warnings() + + +def test_module_remove_nlf_license(): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + ('DELETE', 'cluster/licensing/licenses', SRR['empty_good']), + ]) + module_args = { + 'license_codes': [NLF_EE], + 'state': 'absent', + 'use_rest': 'always' + } + print_warnings() + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_module_remove_nlf_license_by_name(): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + ('DELETE', 'cluster/licensing/licenses', SRR['empty_good']), + ]) + module_args = { + 'license_names': "Enterprise Edition", + 'state': 'absent', + 'use_rest': 'always', + 'serial_number': '12345678' + } + print_warnings() + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_module_error_remove_nlf_license_rest(): + ''' test remove license error''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + ('DELETE', 'cluster/licensing/licenses', SRR['generic_error']), + ]) + module_args = { + 'license_codes': [NLF_EE], + 'state': 'absent', + 'use_rest': 'always' + } + error = rest_error_message('Error removing license for serial number 12345678 and Enterprise Edition', 'cluster/licensing/licenses') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_module_try_to_remove_nlf_license_not_present_rest(): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + ]) + module_args = { + 'license_codes': [NLF_CB], + 'state': 'absent', + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_no_warnings() + + +@patch('time.sleep') +def test_compare_license_status(dont_sleep): + ''' test remove license''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + # 2nd test + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + # deepdiff 1 + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + # deepdiff 2 + ('GET', 'cluster/licensing/licenses', SRR['license_record_nfs']), + # retries + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record']), + # Error, no records + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ('GET', 'cluster/licensing/licenses', SRR['license_record_no_nfs']), + ]) + module_args = { + 'license_names': 'non-existent-package', + 'serial_number': '1-23-45678', + 'use_rest': 'always' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + previous_license_status = {'base': 'compliant', 'nfs': 'unlicensed', 'cifs': 'compliant'} + assert my_obj.compare_license_status(previous_license_status) == [] + previous_license_status = {'base': 'compliant', 'nfs': 'compliant', 'cifs': 'compliant'} + assert my_obj.compare_license_status(previous_license_status) == ['nfs'] + previous_license_status = {'base': 'compliant', 'nfs': 'unlicensed', 'cifs': 'compliant'} + # deepdiffs + my_obj.previous_records = [{'name': 'base', 'scope': 'cluster', 'state': 'compliant'}] + assert my_obj.compare_license_status(previous_license_status) == (['nfs', 'cifs'] if HAS_DEEPDIFF else ['nfs']) + if HAS_DEEPDIFF: + assert_no_warnings() + with patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_license.HAS_DEEPDIFF', False): + assert my_obj.compare_license_status(previous_license_status) == ['nfs'] + print_warnings() + assert_warning_was_raised('deepdiff is required to identify detailed changes') + # retries, success + previous_license_status = {'base': 'compliant', 'nfs': 'unlicensed', 'cifs': 'unlicensed'} + assert my_obj.compare_license_status(previous_license_status) == (['cifs', 'nfs'] if HAS_DEEPDIFF else ['cifs']) + # retries, error + error = "Error: mismatch in license package names: 'nfs'. Expected:" + assert error in expect_and_capture_ansible_exception(my_obj.compare_license_status, 'fail', previous_license_status)['msg'] + + +def test_format_post_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent', + 'license_codes': [] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.format_post_error('some_error', {}) == 'some_error' + rest_error = 'The system received a licensing request with an invalid digital signature.' + error = my_obj.format_post_error(rest_error, {}) + assert error == rest_error + rest_error += ' Failed to install the license at index 0' + error = my_obj.format_post_error(rest_error, {'keys': ["'statusResp'"]}) + assert 'Original NLF contents were modified by Ansible.' in error + error = my_obj.format_post_error(rest_error, {'keys': ["'whatever'"]}) + assert 'Original NLF contents were modified by Ansible.' not in error + + +def test_nlf_is_installed(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent', + 'license_codes': [] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert not my_obj.nlf_is_installed(NLF_DICT_NO_PRODUCT) + assert not my_obj.nlf_is_installed(NLF_DICT_NO_SERIAL) + my_obj.license_status = {} + assert not my_obj.nlf_is_installed(NLF_DICT_PRODUCT_SN) + my_obj.license_status['installed_licenses'] = [] + assert my_obj.nlf_is_installed(NLF_DICT_PRODUCT_SN_STAR) + + +def test_validate_delete_action(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error: product not found in NLF file' + assert error in expect_and_capture_ansible_exception(my_obj.validate_delete_action, 'fail', NLF_DICT_NO_PRODUCT)['msg'] + error = 'Error: serialNumber not found in NLF file' + assert error in expect_and_capture_ansible_exception(my_obj.validate_delete_action, 'fail', NLF_DICT_NO_SERIAL)['msg'] + my_obj.parameters['serial_number'] = 'otherSN' + error = 'Error: mismatch is serial numbers otherSN vs 12345678' + assert error in expect_and_capture_ansible_exception(my_obj.validate_delete_action, 'fail', NLF_DICT_PRODUCT_SN)['msg'] + + +def test_scan_license_codes_for_nlf(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + nlf = NLF_EE.replace("'", "\\'") + nlf = nlf.replace('"', "'") + license_code, nlf_dict, is_nlf = my_obj.scan_license_codes_for_nlf(nlf) + assert len(nlf_dict) == 2 + assert len(nlf_dict['statusResp']) == 8 + + with patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_license.HAS_AST', False): + error = 'Error: ast and json packages are required to install NLF license files.' + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + + with patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_license.HAS_JSON', False): + error = 'Error: ast and json packages are required to install NLF license files.' + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + + with patch('json.dumps') as json_dumps: + json_dumps.side_effect = Exception('exception for test') + error = 'Error: unable to encode input:' + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + + with patch('json.loads') as json_loads: + json_loads.side_effect = Exception('exception for test') + error = 'Error: the license contents cannot be read. Unable to decode input:' + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + + nlf = "'statusResp':" + # older versions of python report unexpected EOF while parsing + # but python 3.10.2 reports exception: invalid syntax (, line 1) + error = "Error: malformed input: 'statusResp':, exception:" + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + + nlf = '"statusResp":' * 2 + error = "Error: NLF license files with multiple licenses are not supported, found 2 in" + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + nlf = '"statusResp":' + ('"serialNumber":' * 2) + error = "Error: NLF license files with multiple serial numbers are not supported, found 2 in" + assert error in expect_and_capture_ansible_exception(my_obj.scan_license_codes_for_nlf, 'fail', nlf)['msg'] + nlf = '"statusResp":' + my_obj.scan_license_codes_for_nlf(nlf) + print_warnings() + assert_warning_was_raised('The license will be installed without checking for idempotency.', partial_match=True) + assert_warning_was_raised('Unable to decode input', partial_match=True) + with patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_license.HAS_JSON', False): + my_obj.scan_license_codes_for_nlf(nlf) + print_warnings() + assert_warning_was_raised('The license will be installed without checking for idempotency.', partial_match=True) + assert_warning_was_raised('the json package is required to process NLF license files', partial_match=True) + + +def test_error_nlf_and_legacy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always', + 'license_codes': [NLF, 'xxxxxxxxxxxxxxxx'] + } + error = 'Error: cannot mix legacy licenses and NLF licenses; found 1 NLF licenses out of 2 license_codes.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_split_nlfs(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always', + 'license_codes': [NLF_MULTIPLE] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert len(my_obj.parameters['license_codes']) == 2 + # force error: + error = 'Error: unexpected format found 2 entries and 3 lines' + assert error in expect_and_capture_ansible_exception(my_obj.split_nlf, 'fail', '%s\nyyyyy' % NLF_MULTIPLE)['msg'] + + +def test_remove_licenses_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'use_rest': 'always', + 'license_codes': [NLF_MULTIPLE] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error: serial_number is required to delete a license.' + assert error in expect_and_capture_ansible_exception(my_obj.remove_licenses_rest, 'fail', 'bundle name', {})['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_local_hosts.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_local_hosts.py new file mode 100644 index 000000000..15de03a8b --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_local_hosts.py @@ -0,0 +1,178 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_local_hosts \ + import NetAppOntapLocalHosts as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'host_record': (200, { + "records": [ + { + "owner": {"name": "svm", "uuid": "e3cb5c7fcd20"}, + "address": "10.10.10.10", + "host": "example.com", + "aliases": ["ex1.com", "ex2.com"] + }], + "num_records": 1 + }, None), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'address': '10.10.10.10', + 'owner': 'svm', +} + + +def test_get_local_host_rest_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['empty_records']) + ]) + module_args = {'address': '10.10.10.10', 'owner': 'svm'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_local_host_rest() is None + + +def test_get_local_host_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['generic_error']) + ]) + module_args = {'address': '10.10.10.10', 'owner': 'svm'} + my_module_object = create_module(my_module, DEFAULT_ARGS, module_args) + msg = 'Error fetching IP to hostname mappings for svm: calling: name-services/local-hosts: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_local_host_rest, 'fail')['msg'] + + +def test_create_local_host_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['empty_records']), + ('POST', 'name-services/local-hosts', SRR['empty_good']) + ]) + module_args = { + 'address': '10.10.10.10', + 'owner': 'svm', + 'host': 'example.com', + 'aliases': ['ex.com', 'ex1.com']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_local_host_rest_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['empty_records']), + ('POST', 'name-services/local-hosts', SRR['generic_error']) + ]) + module_args = { + 'address': '10.10.10.10', + 'owner': 'svm', + 'host': 'example.com', + 'aliases': ['ex.com', 'ex1.com']} + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error creating IP to hostname mappings for svm: calling: name-services/local-hosts: got Expected error.' + assert msg in error + + +def test_create_local_host_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['host_record']) + ]) + module_args = {'state': 'present'} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_local_host(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['host_record']), + ('DELETE', 'name-services/local-hosts/e3cb5c7fcd20/10.10.10.10', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_local_host_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['host_record']), + ('DELETE', 'name-services/local-hosts/e3cb5c7fcd20/10.10.10.10', SRR['generic_error']) + ]) + module_args = {'state': 'absent'} + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error deleting IP to hostname mappings for svm: calling: name-services/local-hosts/e3cb5c7fcd20/10.10.10.10: got Expected error.' + assert msg in error + + +def test_delete_local_host_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['empty_records']) + ]) + module_args = {'state': 'absent'} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_local_host(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['host_record']), + ('PATCH', 'name-services/local-hosts/e3cb5c7fcd20/10.10.10.10', SRR['empty_good']) + ]) + module_args = { + 'address': '10.10.10.10', + 'owner': 'svm', + 'host': 'example1.com', + 'aliases': ['ex.com', 'ex1.com', 'ex2.com']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_local_host_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'name-services/local-hosts', SRR['host_record']), + ('PATCH', 'name-services/local-hosts/e3cb5c7fcd20/10.10.10.10', SRR['generic_error']) + ]) + module_args = { + 'address': '10.10.10.10', + 'owner': 'svm', + 'host': 'example1.com', + 'aliases': ['ex.com', 'ex1.com', 'ex2.com']} + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error updating IP to hostname mappings for svm: calling: name-services/local-hosts/e3cb5c7fcd20/10.10.10.10: got Expected error.' + assert msg in error + + +def validate_input_ipaddress(): + register_responses([ + ]) + module_args = {'address': '2001:0000:3238:DFE1:63:0000:0000:FEFBSS', 'owner': 'svm'} + error = create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: Invalid IP address value 2001:0000:3238:DFE1:63:0000:0000:FEFBSS' + assert msg in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_log_forward.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_log_forward.py new file mode 100644 index 000000000..5214b76d2 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_log_forward.py @@ -0,0 +1,343 @@ +''' unit tests ONTAP Ansible module: na_ontap_log_forward ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_log_forward \ + import NetAppOntapLogForward as log_forward_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'log_forward_record': (200, { + "records": [{ + "address": "10.11.12.13", + "facility": "user", + "port": 514, + "protocol": "udp_unencrypted", + "verify_server": False + }] + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'log_forward': + xml = self.build_log_forward_info() + elif self.type == 'log_forward_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_log_forward_info(): + ''' build xml data for cluster-log-forward-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes': { + 'cluster-log-forward-info': { + 'destination': '10.11.12.13', + 'facility': 'user', + 'port': '514', + 'protocol': 'udp_unencrypted', + 'verify-server': 'false' + } + } + } + + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self, use_rest=None): + if self.onbox: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + destination = '10.11.12.13' + port = 514 + facility = 'user' + force = True + protocol = 'udp_unencrypted' + verify_server = False + else: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + destination = '10.11.12.13' + port = 514 + facility = 'user' + force = True + protocol = 'udp_unencrypted' + verify_server = False + + args = dict({ + 'state': 'present', + 'hostname': hostname, + 'username': username, + 'password': password, + 'destination': destination, + 'port': port, + 'facility': facility, + 'force': force, + 'protocol': protocol, + 'verify_server': verify_server + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_log_forward_mock_object(cx_type='zapi', kind=None): + log_forward_obj = log_forward_module() + if cx_type == 'zapi': + if kind is None: + log_forward_obj.server = MockONTAPConnection() + else: + log_forward_obj.server = MockONTAPConnection(kind=kind) + return log_forward_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + log_forward_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_log_forward_config for non-existent config''' + set_module_args(self.set_default_args(use_rest='Never')) + print('starting') + my_obj = log_forward_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = self.server + assert my_obj.get_log_forward_config is not None + + def test_ensure_get_called_existing(self): + ''' test get_log_forward_config for existing config''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = log_forward_module() + my_obj.server = MockONTAPConnection(kind='log_forward') + assert my_obj.get_log_forward_config() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_log_forward.NetAppOntapLogForward.create_log_forward_config') + def test_successful_create(self, create_log_forward_config): + ''' creating log_forward config and testing idempotency ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_log_forward_config.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('log_forward') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_log_forward.NetAppOntapLogForward.destroy_log_forward_config') + def test_successful_delete(self, destroy_log_forward): + ''' deleting log_forward config and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['state'] = 'absent' + set_module_args(data) + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('log_forward') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # destroy_log_forward_config.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_log_forward.NetAppOntapLogForward.modify_log_forward_config') + def test_successful_modify(self, modify_log_forward_config): + ''' modifying log_forward config and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['facility'] = 'kern' + set_module_args(data) + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('log_forward') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + # modify_log_forward_config.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + data['facility'] = 'user' + set_module_args(data) + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('log_forward') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = log_forward_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('log_forward_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_log_forward_config() + assert 'Error creating log forward config with destination ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.destroy_log_forward_config() + assert 'Error destroying log forward destination ' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['msg'] == SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_create_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['log_forward_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['log_forward_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_delete_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_good'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + data['facility'] = 'kern' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['log_forward_record'], # get + SRR['empty_good'], # delete + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_modify_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['log_forward_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_log_forward_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py new file mode 100644 index 000000000..ac628e8e2 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_login_messages.py @@ -0,0 +1,332 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_login_messages''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import call_main, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_login_messages import main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') +HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required" + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'svm_uuid': (200, {"records": [{"uuid": "test_uuid"}], "num_records": 1}, None), + 'login_info': (200, { + "records": [{ + "banner": "banner", + "message": "message", + "show_cluster_message": True, + "uuid": "uuid_uuid" + }], + "num_records": 1}, None), + 'login_info_trailing_newline': (200, { + "records": [{ + "banner": "banner\n", + "message": "message\n", + "show_cluster_message": True, + "uuid": "uuid_uuid" + }], + "num_records": 1}, None), +}) + + +banner_info = { + 'num-records': 1, + 'attributes-list': [{'vserver-login-banner-info': { + 'message': 'banner message', + }}]} + + +banner_info_empty = { + 'num-records': 1, + 'attributes-list': [{'vserver-login-banner-info': { + 'message': '-', + 'vserver': 'vserver' + }}]} + + +motd_info = { + 'num-records': 1, + 'attributes-list': [{'vserver-motd-info': { + 'is-cluster-message-enabled': 'true', + 'message': 'motd message', + 'vserver': 'vserver' + }}]} + + +motd_info_empty = { + 'num-records': 1, + 'attributes-list': [{'vserver-motd-info': { + 'is-cluster-message-enabled': 'true', + 'vserver': 'vserver' + }}]} + + +ZRR = zapi_responses({ + 'banner_info': build_zapi_response(banner_info), + 'banner_info_empty': build_zapi_response(banner_info_empty), + 'motd_info': build_zapi_response(motd_info), + 'motd_info_empty': build_zapi_response(motd_info_empty), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + } + assert "Error: vserver is a required parameter when using ZAPI." == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successfully_create_banner(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-login-banner-modify-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'banner': 'test banner', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_banner_idempotency(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['banner_info']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'banner': 'banner message', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_create_motd(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info_empty']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['banner_info_empty']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'motd_message': 'test message', + 'show_cluster_motd': False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_motd_idempotency(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['banner_info']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'motd_message': 'motd message', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_motd_modify(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['banner_info']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'motd_message': 'motd message', + 'show_cluster_motd': False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_get_banner_error(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == zapi_error_message('Error fetching login_banner info') + + +def test_get_motd_error(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == zapi_error_message('Error fetching motd info') + + +def test_modify_banner_error(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['banner_info']), + ('ZAPI', 'vserver-login-banner-modify-iter', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'banner': 'modify to new banner', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == zapi_error_message('Error modifying login_banner') + + +def test_modify_motd_error(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-login-banner-get-iter', ZRR['banner_info']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'vserver': 'vserver', + 'motd_message': 'modify to new motd', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == zapi_error_message('Error modifying motd') + + +def test_successfully_create_banner_rest(): + register_responses([ + # no vserver, cluster scope + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['login_info']), + ('PATCH', 'security/login/messages/uuid_uuid', SRR['success']), + # with vserver + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['zero_records']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('PATCH', 'security/login/messages/test_uuid', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'banner': 'test banner', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['vserver'] = 'vserver' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_banner_rest(): + register_responses([ + # no vserver, cluster scope + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['login_info']), + ('PATCH', 'security/login/messages/uuid_uuid', SRR['success']), + # idempotent check + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['login_info_trailing_newline']) + ]) + module_args = { + 'use_rest': 'always', + 'banner': 'banner\n', + 'message': 'message\n', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is False + + +def test_successfully_create_motd_rest(): + register_responses([ + # no vserver, cluster scope + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['login_info']), + ('PATCH', 'security/login/messages/uuid_uuid', SRR['success']), + # with vserver + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['login_info']), + ('PATCH', 'security/login/messages/uuid_uuid', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'motd_message': 'test motd', + 'show_cluster_motd': False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['vserver'] = 'vserver' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_banner_error_rest(): + register_responses([ + # no vserver, cluster scope + # error fetching info + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['generic_error']), + # error no info at cluster level + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['zero_records']), + # with vserver + # error fetching SVM UUID + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['zero_records']), + ('GET', 'svm/svms', SRR['generic_error']), + # error, SVM not found + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['zero_records']), + ('GET', 'svm/svms', SRR['zero_records']), + # error, on patch + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/login/messages', SRR['login_info']), + ('PATCH', 'security/login/messages/uuid_uuid', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + 'banner': 'test banner', + # 'show_cluster_motd': False + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == rest_error_message( + 'Error fetching login_banner info', 'security/login/messages') + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == 'Error fetching login_banner info for cluster - no data.' + module_args['vserver'] = 'vserver' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == rest_error_message('Error fetching vserver vserver', 'svm/svms') + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] ==\ + 'Error fetching vserver vserver. Please make sure vserver name is correct. For cluster vserver, don\'t set vserver.' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == rest_error_message( + 'Error modifying banner', 'security/login/messages/uuid_uuid') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py new file mode 100644 index 000000000..5331458e1 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun.py @@ -0,0 +1,308 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_error_message, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun import NetAppOntapLUN as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def lun_info(name, next_tag=None): + info = { + 'num-records': 1, + 'attributes-list': [{ + 'lun_info': { + 'path': "/what/ever/%s" % name, + 'size': 5368709120, + 'is-space-alloc-enabled': "false", + 'is-space-reservation-enabled': "true", + 'multiprotocol-type': 'linux', + 'qos-policy-group': 'qospol', + 'qos-adaptive-policy-group': 'qosadppol', + } + }] + } + if next_tag: + info['next-tag'] = next_tag + return info + + +ZRR = zapi_responses({ + 'lun_info': build_zapi_response(lun_info('lun_name')), + 'lun_info_from': build_zapi_response(lun_info('lun_from_name')), + 'lun_info_with_tag': build_zapi_response(lun_info('lun_name', 'more to come')), + 'error_9042': build_zapi_error(9042, 'new size == old size, more or less'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'use_rest', + 'name': 'lun_name', + 'vserver': 'lunsvm_name', +} + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never' + } + print('Info: %s' % call_main(my_main, {}, module_args, fail=True)['msg']) + + +def test_create_error_missing_param(): + ''' Test if create throws an error if required param 'destination_vserver' is not specified''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + } + msg = "Error: 'flexvol_name' option is required when using ZAPI." + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['flexvol_name'] = 'xxx' + msg = 'size is a required parameter for create.' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ('ZAPI', 'lun-create-by-size', ZRR['success']), + # second create + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ('ZAPI', 'lun-create-by-size', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'comment': 'some comment', + 'flexvol_name': 'vol_name', + 'qos_adaptive_policy_group': 'new_adaptive_pol', + 'size': 5, + 'space_allocation': False, + 'space_reserve': False, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + 'use_rest': 'never', + 'comment': 'some comment', + 'flexvol_name': 'vol_name', + 'os_type': 'windows', + 'qos_policy_group': 'new_pol', + 'size': 5, + 'space_allocation': False, + 'space_reserve': False, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_rename_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + 'size': 5, + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_lun(): + ''' Test delete and idempotency ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-destroy', ZRR['success']), + # idempotency + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + 'state': 'absent', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_lun_no_input(): + ''' Nothing to delete! ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + 'state': 'absent', + } + msg = "Error: 'flexvol_name' option is required when using ZAPI." + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successful_resize(): + ''' Test successful resize ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-resize', ZRR['success']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-resize', ZRR['error_9042']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + 'size': 7 + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + ''' Test successful modify ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-set-comment', ZRR['success']), + ('ZAPI', 'lun-set-qos-policy-group', ZRR['success']), + ('ZAPI', 'lun-set-space-alloc', ZRR['success']), + # second call + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ('ZAPI', 'lun-set-comment', ZRR['success']), + ('ZAPI', 'lun-set-qos-policy-group', ZRR['success']), + ('ZAPI', 'lun-set-space-reservation-info', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'comment': 'some comment', + 'flexvol_name': 'vol_name', + 'qos_policy_group': 'new_pol', + 'space_allocation': True, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + 'use_rest': 'never', + 'comment': 'some comment', + 'flexvol_name': 'vol_name', + 'qos_adaptive_policy_group': 'new_adaptive_pol', + 'space_allocation': False, + 'space_reserve': False, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_modify(): + ''' Test successful modify ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['lun_info']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + 'comment': 'some comment', + 'os_type': 'windows', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == 'os_type cannot be modified: current: linux, desired: windows' + + +def test_successful_rename(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info_from']), + ('ZAPI', 'lun-move', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + 'from_name': 'lun_from_name' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_failed_rename(): + ''' Test failed rename ''' + register_responses([ + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ('ZAPI', 'lun-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + 'from_name': 'lun_from_name' + } + msg = 'Error renaming lun: lun_from_name does not exist' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_zapi_errors(): + register_responses([ + # get error + ('ZAPI', 'lun-get-iter', ZRR['error']), + # error on next tag + ('ZAPI', 'lun-get-iter', ZRR['lun_info_with_tag']), + ('ZAPI', 'lun-get-iter', ZRR['lun_info_with_tag']), + ('ZAPI', 'lun-get-iter', ZRR['error']), + # create error + ('ZAPI', 'lun-create-by-size', ZRR['error']), + # resize error + ('ZAPI', 'lun-resize', ZRR['error']), + # rename error + ('ZAPI', 'lun-move', ZRR['error']), + # modify error + ('ZAPI', 'lun-set-space-reservation-info', ZRR['error']), + # delete error + ('ZAPI', 'lun-destroy', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'flexvol_name': 'vol_name', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + msg = 'Error fetching luns for vol_name' + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.get_luns, 'fail')['msg'] + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.get_luns, 'fail')['msg'] + + my_obj.parameters['size'] = 123456 + msg = 'Error provisioning lun lun_name of size 123456' + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.create_lun, 'fail')['msg'] + + msg = 'Error resizing lun path' + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.resize_lun, 'fail', 'path')['msg'] + + my_obj.parameters.pop('size') + msg = 'Error moving lun old_path' + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.rename_lun, 'fail', 'old_path', 'new_path')['msg'] + + msg = 'Error setting lun option space_reserve' + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.modify_lun, 'fail', 'path', {'space_reserve': True})['msg'] + + msg = 'Error deleting lun path' + assert zapi_error_message(msg) == expect_and_capture_ansible_exception(my_obj.delete_lun, 'fail', 'path')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_app_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_app_rest.py new file mode 100644 index 000000000..0a874f2a6 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_app_rest.py @@ -0,0 +1,584 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock, call +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible, assert_warning_was_raised, print_warnings +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun \ + import NetAppOntapLUN as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest_96': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_97': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy')), None), + 'is_rest_98': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {'records': []}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_apps_empty': (200, + {'records': [], + 'num_records': 0 + }, + None + ), + 'get_apps_found': (200, + {'records': [dict(name='san_appli', uuid='1234')], + 'num_records': 1 + }, + None + ), + 'get_app_components': (200, + {'records': [dict(name='san_appli', uuid='1234')], + 'num_records': 1 + }, + None + ), + 'get_app_details': (200, + dict(name='san_appli', uuid='1234', + san=dict(application_components=[dict(name='lun_name', lun_count=3, total_size=1000)]), + statistics=dict(space=dict(provisioned=1100)) + ), + None + ), + 'get_app_component_details': (200, + {'backing_storage': dict(luns=[]), + }, + None + ), + 'get_volumes_found': (200, + {'records': [dict(name='san_appli', uuid='1234')], + 'num_records': 1 + }, + None + ), + 'get_lun_path': (200, + {'records': [{'uuid': '1234', 'path': '/vol/lun_name/lun_name'}], + 'num_records': 1 + }, + None + ), + 'one_lun': (200, + {'records': [{ + 'uuid': "1234", + 'name': '/vol/lun_name/lun_name', + 'path': '/vol/lun_name/lun_name', + 'size': 9871360, + 'comment': None, + 'flexvol_name': None, + 'os_type': 'xyz', + 'qos_policy_group': None, + 'space_reserve': False, + 'space_allocation': False + }], + }, None), + 'get_storage': (200, + {'backing_storage': dict(luns=[{'path': '/vol/lun_name/lun_name', + 'uuid': '1234', + 'size': 15728640, + 'creation_timestamp': '2022-07-26T20:35:50+00:00' + }]), + }, None), + +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, parm1=None): + ''' save arguments ''' + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'lun': + xml = self.build_lun_info(self.parm1) + self.xml_out = xml + return xml + + @staticmethod + def build_lun_info(lun_name): + ''' build xml data for lun-info ''' + xml = netapp_utils.zapi.NaElement('xml') + lun = dict( + lun_info=dict( + path="/what/ever/%s" % lun_name, + size=10 + ) + ) + attributes = { + 'num-records': 1, + 'attributes-list': [lun] + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_lun_args = { + 'vserver': 'ansible', + 'name': 'lun_name', + 'flexvol_name': 'vol_name', + 'state': 'present' + } + + def mock_args(self): + return { + 'vserver': self.mock_lun_args['vserver'], + 'name': self.mock_lun_args['name'], + 'flexvol_name': self.mock_lun_args['flexvol_name'], + 'state': self.mock_lun_args['state'], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + } + # self.server = MockONTAPConnection() + + def get_lun_mock_object(self, kind=None, parm1=None): + """ + Helper method to return an na_ontap_lun object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_interface object + """ + lun_obj = my_module() + lun_obj.autosupport_log = Mock(return_value=None) + lun_obj.server = MockONTAPConnection(kind=kind, parm1=parm1) + return lun_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_create_error_missing_param(self): + ''' Test if create throws an error if required param 'destination_vserver' is not specified''' + data = self.mock_args() + set_module_args(data) + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli') + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + msg = 'size is a required parameter for create.' + assert msg == exc.value.args[0]['msg'] + + def test_create_error_missing_param2(self): + ''' Test if create throws an error if required param 'destination_vserver' is not specified''' + data = self.mock_args() + data.pop('flexvol_name') + data['size'] = 5 + data['san_application_template'] = dict(lun_count=6) + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + msg = 'missing required arguments: name found in san_application_template' + assert msg == exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_appli(self, mock_request): + ''' Test successful create ''' + mock_request.side_effect = [ + SRR['is_rest_98'], + SRR['get_apps_empty'], # GET application/applications + SRR['get_apps_empty'], # GET volumes + SRR['empty_good'], # POST application/applications + SRR['end_of_sequence'] + ] + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + tiering = dict(control='required') + data['san_application_template'] = dict(name='san_appli', tiering=tiering) + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + assert exc.value.args[0]['changed'] + expected_json = {'name': 'san_appli', 'svm': {'name': 'ansible'}, 'smart_container': True, + 'san': {'application_components': + [{'name': 'lun_name', 'lun_count': 1, 'total_size': 5368709120, 'tiering': {'control': 'required'}}]}} + expected_call = call( + 'POST', 'application/applications', {'return_timeout': 30, 'return_records': 'true'}, json=expected_json, headers=None, files=None) + assert expected_call in mock_request.mock_calls + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_appli_idem(self, mock_request): + ''' Test successful create idempotent ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['get_apps_found'], # GET application/applications//components + SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli') + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_create_appli_idem_no_comp(self, mock_request): + ''' Test successful create idempotent ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['get_apps_empty'], # GET application/applications//components + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli') + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + # print(mock_request.call_args_list) + msg = 'Error: no component for application san_appli' + assert msg == exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_delete_appli(self, mock_request): + ''' Test successful create ''' + mock_request.side_effect = [ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['empty_good'], # POST application/applications + SRR['end_of_sequence'] + ] + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli') + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_delete_appli_idem(self, mock_request): + ''' Test successful delete idempotent ''' + mock_request.side_effect = [ + SRR['is_rest_98'], + SRR['get_apps_empty'], # GET application/applications + SRR['end_of_sequence'] + ] + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli') + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_appli(self, mock_request): + ''' Test successful modify application ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['get_apps_found'], # GET application/applications//components + SRR['get_app_component_details'], # GET application/applications//components/ + SRR['empty_good'], + SRR['get_lun_path'], + SRR['get_storage'], + SRR['one_lun'], + SRR['empty_good'], + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['os_type'] = 'xyz' + data['space_reserve'] = True + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', lun_count=5, total_size=1000, igroup_name='abc') + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + print(exc.value.args[0]) + # print(mock_request.call_args_list) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_error_modify_appli_missing_igroup(self, mock_request): + ''' Test successful modify application ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + # SRR['get_apps_found'], # GET application/applications//components + # SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', lun_count=5) + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + msg = 'Error: igroup_name is a required parameter when increasing lun_count.' + assert msg in exc.value.args[0]['msg'] + msg = 'Error: total_size is a required parameter when increasing lun_count.' + assert msg in exc.value.args[0]['msg'] + msg = 'Error: os_type is a required parameter when increasing lun_count.' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_no_action(self, mock_request): + ''' Test successful modify application ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['get_apps_found'], # GET application/applications//components + SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['name'] = 'unknown' + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', lun_count=5) + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + print(exc.value.args[0]) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_no_96(self, mock_request): + ''' Test SAN application not supported on 9.6 ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_96'], + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['name'] = 'unknown' + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', lun_count=5) + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + print(exc.value.args[0]['msg']) + msg = 'Error: using san_application_template requires ONTAP 9.7 or later and REST must be enabled.' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_no_modify_on97(self, mock_request): + ''' Test modify SAN application not supported on 9.7 ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_97'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['get_apps_found'], # GET application/applications//components + SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data.pop('flexvol_name') + data['os_type'] = 'xyz' + data['san_application_template'] = dict(name='san_appli', lun_count=5, total_size=1000, igroup_name='abc') + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + print(exc.value.args[0]) + msg = 'Error: modifying lun_count, total_size is not supported on ONTAP 9.7' + # in python 2.6, keys() is not sorted! + msg2 = 'Error: modifying total_size, lun_count is not supported on ONTAP 9.7' + assert msg in exc.value.args[0]['msg'] or msg2 in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_no_modify_on97_2(self, mock_request): + ''' Test modify SAN application not supported on 9.7 ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_97'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['get_apps_found'], # GET application/applications//components + SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', total_size=1000) + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + print(exc.value.args[0]) + msg = 'Error: modifying total_size is not supported on ONTAP 9.7' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_app_changes_reduction_not_allowed(self, mock_request): + ''' Test modify SAN application - can't decrease size ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + # SRR['get_apps_found'], # GET application/applications//components + # SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', total_size=899, total_size_unit='b') + set_module_args(data) + lun_object = self.get_lun_mock_object() + with pytest.raises(AnsibleFailJson) as exc: + lun_object.app_changes('scope') + msg = "Error: can't reduce size: total_size=1000, provisioned=1100, requested=899" + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_app_changes_reduction_small_enough_10(self, mock_request): + ''' Test modify SAN application - a 10% reduction is ignored ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + # SRR['get_apps_found'], # GET application/applications//components + # SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', total_size=900, total_size_unit='b') + set_module_args(data) + lun_object = self.get_lun_mock_object() + results = lun_object.app_changes('scope') + print(results) + print(lun_object.debug) + msg = "Ignoring small reduction (10.0 %) in total size: total_size=1000, provisioned=1100, requested=900" + assert_warning_was_raised(msg) + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_app_changes_reduction_small_enough_17(self, mock_request): + ''' Test modify SAN application - a 1.7% reduction is ignored ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + # SRR['get_apps_found'], # GET application/applications//components + # SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', total_size=983, total_size_unit='b') + set_module_args(data) + lun_object = self.get_lun_mock_object() + results = lun_object.app_changes('scope') + print(results) + print(lun_object.debug) + print_warnings() + msg = "Ignoring small reduction (1.7 %) in total size: total_size=1000, provisioned=1100, requested=983" + assert_warning_was_raised(msg) + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_app_changes_increase_small_enough(self, mock_request): + ''' Test modify SAN application - a 1.7% reduction is ignored ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + # SRR['get_apps_found'], # GET application/applications//components + # SRR['get_app_component_details'], # GET application/applications//components/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data.pop('flexvol_name') + data['san_application_template'] = dict(name='san_appli', total_size=1050, total_size_unit='b') + set_module_args(data) + lun_object = self.get_lun_mock_object() + results = lun_object.app_changes('scope') + print(results) + print(lun_object.debug) + msg = "Ignoring increase: requested size is too small: total_size=1000, provisioned=1100, requested=1050" + assert_warning_was_raised(msg) + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_convert_to_appli(self, mock_request): + ''' Test successful convert to application + Appli does not exist, but the volume does. + ''' + mock_request.side_effect = copy.deepcopy([ + SRR['is_rest_98'], + SRR['get_apps_empty'], # GET application/applications + SRR['get_volumes_found'], # GET volumes + SRR['empty_good'], # POST application/applications + SRR['get_apps_found'], # GET application/applications + SRR['get_app_details'], # GET application/applications/ + SRR['end_of_sequence'] + ]) + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + tiering = dict(control='required') + data['san_application_template'] = dict(name='san_appli', tiering=tiering, scope='application') + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_lun_mock_object().apply() + # assert exc.value.args[0]['changed'] + print(mock_request.mock_calls) + print(exc.value.args[0]) + expected_json = {'name': 'san_appli', 'svm': {'name': 'ansible'}, 'smart_container': True, + 'san': {'application_components': + [{'name': 'lun_name'}]}} + expected_call = call( + 'POST', 'application/applications', {'return_timeout': 30, 'return_records': 'true'}, json=expected_json, headers=None, files=None) + assert expected_call in mock_request.mock_calls + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_convert_to_appli(self, mock_request): + ''' Test successful convert to application + Appli does not exist, but the volume does. + ''' + mock_request.side_effect = [ + SRR['is_rest_97'], + SRR['get_apps_empty'], # GET application/applications + SRR['get_volumes_found'], # GET volumes + SRR['end_of_sequence'] + ] + data = dict(self.mock_args()) + data['size'] = 5 + data.pop('flexvol_name') + tiering = dict(control='required') + data['san_application_template'] = dict(name='san_appli', tiering=tiering, scope='application') + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_lun_mock_object().apply() + msg = "Error: converting a LUN volume to a SAN application container requires ONTAP 9.8 or better." + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py new file mode 100644 index 000000000..93d446809 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_copy.py @@ -0,0 +1,113 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_copy \ + import NetAppOntapLUNCopy as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +DEFAULT_ARGS = { + 'source_vserver': 'ansible', + 'destination_path': '/vol/test/test_copy_dest_dest_new_reviewd_new', + 'source_path': '/vol/test/test_copy_1', + 'destination_vserver': 'ansible', + 'state': 'present', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never' +} + + +ZRR = zapi_responses({ + 'lun_info': build_zapi_response({'num-records': 1}) +}) + + +SRR = rest_responses({ + 'lun_info': (200, {"records": [{ + "name": "/vol/vol0/lun1_10" + }], "num_records": 1}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "destination_vserver", "destination_path", "source_path"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_create_error_missing_param(): + ''' Test if create throws an error if required param 'destination_vserver' is not specified''' + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['destination_vserver'] + msg = 'missing required arguments: destination_vserver' + assert msg in create_module(my_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + +def test_successful_copy(): + ''' Test successful create and idempotent check ''' + register_responses([ + ('lun-get-iter', ZRR['empty']), + ('lun-copy-start', ZRR['success']), + ('lun-get-iter', ZRR['lun_info']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('lun-get-iter', ZRR['error']), + ('lun-copy-start', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/luns', SRR['generic_error']), + ('POST', 'storage/luns', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/luns', SRR['empty_records']) + ]) + lun_obj = create_module(my_module, DEFAULT_ARGS) + assert 'Error getting lun info' in expect_and_capture_ansible_exception(lun_obj.get_lun, 'fail')['msg'] + assert 'Error copying lun from' in expect_and_capture_ansible_exception(lun_obj.copy_lun, 'fail')['msg'] + lun_obj = create_module(my_module, DEFAULT_ARGS, {'use_rest': 'always'}) + assert 'Error getting lun info' in expect_and_capture_ansible_exception(lun_obj.get_lun_rest, 'fail')['msg'] + assert 'Error copying lun from' in expect_and_capture_ansible_exception(lun_obj.copy_lun_rest, 'fail')['msg'] + assert 'REST requires ONTAP 9.10.1 or later' in create_module(my_module, DEFAULT_ARGS, {'use_rest': 'always'}, fail=True)['msg'] + args = {'use_rest': 'always', 'destination_vserver': 'some_vserver'} + assert 'REST does not supports inter-Vserver lun copy' in create_and_apply(my_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_successful_copy_rest(): + ''' Test successful create and idempotent check in REST ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/luns', SRR['empty_records']), + ('POST', 'storage/luns', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/luns', SRR['lun_info']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py new file mode 100644 index 000000000..120e5f7b3 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map.py @@ -0,0 +1,159 @@ +''' unit tests ONTAP Ansible module: na_ontap_lun_map ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map \ + import NetAppOntapLUNMap as my_module + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'lun_map': + xml = self.build_lun_info() + elif self.type == 'lun_map_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_lun_info(): + ''' build xml data for lun-map-entry ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'initiator-groups': [{'initiator-group-info': {'initiator-group-name': 'ansible', 'lun-id': 2}}]} + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + initiator_group_name = 'ansible' + vserver = 'ansible' + path = '/vol/ansible/test' + lun_id = 2 + else: + hostname = 'hostname' + username = 'username' + password = 'password' + initiator_group_name = 'ansible' + vserver = 'ansible' + path = '/vol/ansible/test' + lun_id = 2 + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'initiator_group_name': initiator_group_name, + 'vserver': vserver, + 'path': path, + 'lun_id': lun_id, + 'use_rest': 'false' + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_lun_map for non-existent lun''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + assert my_obj.get_lun_map is not None + + def test_ensure_get_called_existing(self): + ''' test get_lun_map for existing lun''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='lun_map') + assert my_obj.get_lun_map() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map.NetAppOntapLUNMap.create_lun_map') + def test_successful_create(self, create_lun_map): + ''' mapping lun and testing idempotency ''' + data = self.set_default_args() + set_module_args(data) + my_obj = my_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_lun_map.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + set_module_args(self.set_default_args()) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('lun_map') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map.NetAppOntapLUNMap.delete_lun_map') + def test_successful_delete(self, delete_lun_map): + ''' unmapping lun and testing idempotency ''' + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('lun_map') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + delete_lun_map.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('lun_map_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_lun_map() + assert 'Error mapping lun' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_lun_map() + assert 'Error unmapping lun' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_reporting_nodes.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_reporting_nodes.py new file mode 100644 index 000000000..e09016eda --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_reporting_nodes.py @@ -0,0 +1,170 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP lun reporting nodes Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map_reporting_nodes \ + import NetAppOntapLUNMapReportingNodes as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +DEFAULT_ARGS = { + 'initiator_group_name': 'igroup1', + "path": "/vol/lun1/lun1_1", + "vserver": "svm1", + 'nodes': 'ontap910-01', + 'state': 'present', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never' +} + + +node_info = { + 'num-records': "1", + 'attributes-list': { + 'lun-map-info': { + 'reporting-nodes': [{"node-name": "ontap910-01"}] + } + } +} + + +nodes_info = { + 'num-records': "1", + 'attributes-list': { + 'lun-map-info': { + 'reporting-nodes': [{"node-name": "ontap910-01"}, {"node-name": "ontap910-02"}] + } + } +} + + +ZRR = zapi_responses({ + 'node_info': build_zapi_response(node_info), + 'nodes_info': build_zapi_response(nodes_info) +}) + + +SRR = rest_responses({ + 'node_info': (200, {"records": [{ + "svm": {"name": "svm1"}, + "lun": {"uuid": "ea78ec41", "name": "/vol/ansibleLUN/ansibleLUN"}, + "igroup": {"uuid": "8b8aa177", "name": "testme_igroup"}, + "reporting_nodes": [{"uuid": "20f6b3d5", "name": "ontap910-01"}] + }], "num_records": 1}, None), + 'nodes_info': (200, {"records": [{ + "svm": {"name": "svm1"}, + "lun": {"uuid": "ea78ec41", "name": "/vol/ansibleLUN/ansibleLUN"}, + "igroup": {"uuid": "8b8aa177", "name": "testme_igroup"}, + "reporting_nodes": [{"uuid": "20f6b3d5", "name": "ontap910-01"}, {"uuid": "20f6b3d6", "name": "ontap910-02"}] + }], "num_records": 1}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "initiator_group_name", "vserver", "path", "nodes"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_successful_add_node(): + ''' Test successful add and idempotent check ''' + register_responses([ + ('lun-map-get-iter', ZRR['node_info']), + ('lun-map-add-reporting-nodes', ZRR['success']), + ('lun-map-get-iter', ZRR['nodes_info']), + ]) + args = {'nodes': ['ontap910-01', 'ontap910-02']} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_remove_node(): + ''' Test successful remove and idempotent check ''' + register_responses([ + ('lun-map-get-iter', ZRR['nodes_info']), + ('lun-map-remove-reporting-nodes', ZRR['success']), + ('lun-map-get-iter', ZRR['node_info']), + ]) + args = {'nodes': 'ontap910-02', 'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('lun-map-get-iter', ZRR['no_records']), + ('lun-map-get-iter', ZRR['error']), + ('lun-map-add-reporting-nodes', ZRR['error']), + ('lun-map-remove-reporting-nodes', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/san/lun-maps', SRR['generic_error']), + ('POST', 'protocols/san/lun-maps/3edf6t/3edf62/reporting-nodes', SRR['generic_error']), + ('DELETE', 'protocols/san/lun-maps/3edf6t/3edf62/reporting-nodes/3dr567', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_9_9_1']) + ]) + node_obj = create_module(my_module, DEFAULT_ARGS) + assert 'Error: LUN map not found' in expect_and_capture_ansible_exception(node_obj.apply, 'fail')['msg'] + assert 'Error getting LUN' in expect_and_capture_ansible_exception(node_obj.get_lun_map_reporting_nodes, 'fail')['msg'] + assert 'Error creating LUN map reporting nodes' in expect_and_capture_ansible_exception(node_obj.add_lun_map_reporting_nodes, 'fail', 'node1')['msg'] + assert 'Error deleting LUN map reporting node' in expect_and_capture_ansible_exception(node_obj.remove_lun_map_reporting_nodes, 'fail', 'node1')['msg'] + + node_obj = create_module(my_module, DEFAULT_ARGS, {'use_rest': 'always'}) + node_obj.lun_uuid, node_obj.igroup_uuid = '3edf6t', '3edf62' + node_obj.nodes_uuids = {'node1': '3dr567'} + assert 'Error getting LUN' in expect_and_capture_ansible_exception(node_obj.get_lun_map_reporting_nodes, 'fail')['msg'] + assert 'Error creating LUN map reporting node' in expect_and_capture_ansible_exception(node_obj.add_lun_map_reporting_nodes_rest, 'fail', 'node1')['msg'] + assert 'Error deleting LUN map reporting node' in expect_and_capture_ansible_exception(node_obj.remove_lun_map_reporting_nodes_rest, 'fail', 'node1')['msg'] + assert 'REST requires ONTAP 9.10.1 or later' in create_module(my_module, DEFAULT_ARGS, {'use_rest': 'always'}, fail=True)['msg'] + + +def test_successful_add_node_rest(): + ''' Test successful add and idempotent check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/san/lun-maps', SRR['node_info']), + ('POST', 'protocols/san/lun-maps/ea78ec41/8b8aa177/reporting-nodes', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/san/lun-maps', SRR['nodes_info']) + ]) + args = {'nodes': ['ontap910-01', 'ontap910-02'], 'use_rest': 'always'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_remove_node_rest(): + ''' Test successful remove and idempotent check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/san/lun-maps', SRR['nodes_info']), + ('DELETE', 'protocols/san/lun-maps/ea78ec41/8b8aa177/reporting-nodes/20f6b3d6', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/san/lun-maps', SRR['node_info']) + ]) + args = {'nodes': 'ontap910-02', 'state': 'absent', 'use_rest': 'always'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_rest.py new file mode 100644 index 000000000..1881ee37a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_map_rest.py @@ -0,0 +1,200 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun_map \ + import NetAppOntapLUNMap as my_module, main as my_main # module under test + +# needed for get and modify/delete as they still use ZAPI +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request + +SRR = rest_responses({ + 'lun': (200, {"records": [ + { + "uuid": "2f030603-3daa-4e19-9888-f9c3ac9a9117", + "name": "/vol/ansibleLUN_vol1/ansibleLUN", + "os_type": "linux", + "serial_number": "wOpku+Rjd-YL", + "space": { + "size": 5242880 + }, + "status": { + "state": "online" + } + }]}, None), + 'lun_map': (200, {"records": [ + { + "igroup": { + "uuid": "1ad8544d-8cd1-91e0-9e1c-723478563412", + "name": "igroup1", + }, + "logical_unit_number": 1, + "lun": { + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + "name": "this/is/a/path", + }, + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + } + } + ]}, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'path': 'this/is/a/path', + 'initiator_group_name': 'igroup1', + 'vserver': 'svm1', + 'use_rest': 'always', +} + + +def test_get_lun_map_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/san/lun-maps', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_lun_map_rest() is None + + +def test_get_lun_map_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/san/lun-maps', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error getting lun_map this/is/a/path: calling: protocols/san/lun-maps: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_lun_map_rest, 'fail')['msg'] + + +def test_get_lun_map_one_record(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/san/lun-maps', SRR['lun_map']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_lun_map_rest() is not None + + +def test_get_lun_one_record(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['lun']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_lun_rest() is not None + + +def test_get_lun_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error getting lun this/is/a/path: calling: storage/luns: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_lun_rest, 'fail')['msg'] + + +def test_create_lun_map(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('GET', 'protocols/san/lun-maps', SRR['empty_records']), + ('POST', 'protocols/san/lun-maps', SRR['empty_good']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {})['changed'] + + +def test_create_lun_map_with_lun_id(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('GET', 'protocols/san/lun-maps', SRR['empty_records']), + ('POST', 'protocols/san/lun-maps', SRR['empty_good']) + ]) + module_args = {'lun_id': '1'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_lun_map_with_lun_id_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['lun']), + ('GET', 'protocols/san/lun-maps', SRR['lun_map']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'lun_id': '1'})['changed'] is False + + +def test_create_lun_map_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'protocols/san/lun-maps', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error creating lun_map this/is/a/path: calling: protocols/san/lun-maps: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.create_lun_map_rest, 'fail')['msg'] + + +def test_delete_lun_map(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('GET', 'protocols/san/lun-maps', SRR['lun_map']), + ('DELETE', 'protocols/san/lun-maps/1cd8a442-86d1-11e0-ae1c-123478563412/1ad8544d-8cd1-91e0-9e1c-723478563412', + SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_lun_map_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('GET', 'protocols/san/lun-maps', SRR['lun_map']), + ]) + module_args = {'initiator_group_name': 'new name'} + msg = 'Modification of lun_map not allowed' + assert msg in create_and_apply(my_module, DEFAULT_ARGS, module_args, 'fail')['msg'] + + +def test_delete_lun_map_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('DELETE', 'protocols/san/lun-maps/1cd8a442-86d1-11e0-ae1c-123478563412/1ad8544d-8cd1-91e0-9e1c-723478563412', + SRR['generic_error']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + my_obj.parameters['state'] = 'absent' + my_obj.igroup_uuid = '1ad8544d-8cd1-91e0-9e1c-723478563412' + my_obj.lun_uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + msg = 'Error deleting lun_map this/is/a/path: calling: ' \ + 'protocols/san/lun-maps/1cd8a442-86d1-11e0-ae1c-123478563412/1ad8544d-8cd1-91e0-9e1c-723478563412: got Expected error.' + assert msg == expect_and_capture_ansible_exception(my_obj.delete_lun_map_rest, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py new file mode 100644 index 000000000..fd65062d0 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_lun_rest.py @@ -0,0 +1,558 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_lun \ + import NetAppOntapLUN as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'one_lun': (200, { + "records": [ + { + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + "qos_policy": { + "name": "qos1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + "os_type": "aix", + "enabled": True, + "location": { + "volume": { + "name": "volume1", + "uuid": "028baa66-41bd-11e9-81d5-00a0986138f7" + }, + }, + "name": "/vol/volume1/qtree1/lun1", + "space": { + "scsi_thin_provisioning_support_enabled": True, + "guarantee": { + "requested": True, + }, + "size": 1073741824 + }, + "lun_maps": [ + { + "igroup": { + "name": "igroup1", + "uuid": "4ea7a442-86d1-11e0-ae1c-123478563412" + }, + "logical_unit_number": 0, + } + ], + "comment": "string", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + } + ], + }, None), + 'two_luns': (200, { + "records": [ + { + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + "qos_policy": { + "name": "qos1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + "os_type": "aix", + "enabled": True, + "location": { + "volume": { + "name": "volume1", + "uuid": "028baa66-41bd-11e9-81d5-00a0986138f7" + }, + }, + "name": "/vol/volume1/qtree1/lun1", + "space": { + "scsi_thin_provisioning_support_enabled": True, + "guarantee": { + "requested": True, + }, + "size": 1073741824 + }, + "lun_maps": [ + { + "igroup": { + "name": "igroup1", + "uuid": "4ea7a442-86d1-11e0-ae1c-123478563412" + }, + "logical_unit_number": 0, + } + ], + "comment": "string", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + }, + { + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563413", + "qos_policy": { + "name": "qos2", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563413" + }, + "os_type": "aix", + "enabled": True, + "location": { + "volume": { + "name": "volume2", + "uuid": "028baa66-41bd-11e9-81d5-00a0986138f3" + }, + }, + "name": "/vol/volume1/qtree1/lun2", + "space": { + "scsi_thin_provisioning_support_enabled": True, + "guarantee": { + "requested": True, + }, + "size": 1073741824 + }, + "lun_maps": [ + { + "igroup": { + "name": "igroup2", + "uuid": "4ea7a442-86d1-11e0-ae1c-123478563413" + }, + "logical_unit_number": 0, + } + ], + "comment": "string", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f3" + }, + } + ], + }, None), + 'error_same_size': (400, None, 'New LUN size is the same as the old LUN size - this mau happen ...') +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': '/vol/volume1/qtree1/lun1', + 'flexvol_name': 'volume1', + 'vserver': 'svm1', + 'use_rest': 'always', +} + +DEFAULT_ARGS_NO_VOL = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': '/vol/volume1/qtree1/lun1', + 'vserver': 'svm1', + 'use_rest': 'always', +} + +DEFAULT_ARGS_MIN = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'svm1', + 'use_rest': 'always', +} + + +def test_get_lun_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert my_obj.get_luns_rest() is None + + +def test_get_lun_one(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['one_lun']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + get_results = my_obj.get_luns_rest() + assert len(get_results) == 1 + assert get_results[0]['name'] == '/vol/volume1/qtree1/lun1' + + +def test_get_lun_one_no_path(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['one_lun']) + ]) + module_args = { + 'name': 'lun1', + 'flexvol_name': 'volume1', + } + my_obj = create_module(my_module, DEFAULT_ARGS_MIN, module_args) + get_results = my_obj.get_luns_rest() + assert len(get_results) == 1 + assert get_results[0]['name'] == '/vol/volume1/qtree1/lun1' + + +def test_get_lun_more(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['two_luns']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + get_results = my_obj.get_luns_rest() + assert len(get_results) == 2 + assert get_results[0]['name'] == '/vol/volume1/qtree1/lun1' + assert get_results[1]['name'] == '/vol/volume1/qtree1/lun2' + + +def test_error_get_lun_with_flexvol(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.get_luns_rest, 'fail')['msg'] + print('Info: %s' % error) + assert "Error getting LUN's for flexvol volume1: calling: storage/luns: got Expected error." == error + + +def test_error_get_lun_with_lun_path(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['lun_path'] = '/vol/volume1/qtree1/lun1' + my_obj.parameters.pop('flexvol_name') + + error = expect_and_capture_ansible_exception(my_obj.get_luns_rest, 'fail', '/vol/volume1/qtree1/lun1')['msg'] + print('Info: %s' % error) + assert "Error getting lun_path /vol/volume1/qtree1/lun1: calling: storage/luns: got Expected error." == error + + +def test_successfully_create_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('POST', 'storage/luns', SRR['one_lun']), + ]) + module_args = { + 'size': 1073741824, + 'size_unit': 'bytes', + 'os_type': 'linux', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_create_lun_without_path(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('POST', 'storage/luns', SRR['one_lun']), + ]) + module_args = { + 'size': 1073741824, + 'size_unit': 'bytes', + 'os_type': 'linux', + 'flexvol_name': 'volume1', + 'name': 'lun' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_lun_missing_os_type(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['size'] = 1073741824 + my_obj.parameters['size_unit'] = 'bytes' + error = expect_and_capture_ansible_exception(my_obj.apply, 'fail')['msg'] + print('Info: %s' % error) + assert "The os_type parameter is required for creating a LUN with REST." == error + + +def test_error_create_lun_missing_size(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['os_type'] = 'linux' + error = expect_and_capture_ansible_exception(my_obj.apply, 'fail')['msg'] + print('Info: %s' % error) + assert "size is a required parameter for create." == error + + +def test_error_create_lun_missing_name(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + # Not sure why test_error_create_lun_missing_os_type require this... but this test dosn't. they should follow the + # same path (unless we don't do a get with flexvol_name isn't set) + # ('GET', 'storage/luns', SRR['empty_records']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters.pop('flexvol_name') + my_obj.parameters['os_type'] = 'linux' + my_obj.parameters['size'] = 1073741824 + my_obj.parameters['size_unit'] = 'bytes' + error = expect_and_capture_ansible_exception(my_obj.apply, 'fail')['msg'] + print('Info: %s' % error) + assert "The flexvol_name parameter is required for creating a LUN." == error + + +def test_successfully_create_lun_all_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/luns', SRR['empty_records']), + ('POST', 'storage/luns', SRR['one_lun']), + ]) + module_args = { + 'size': '1073741824', + 'os_type': 'linux', + 'space_reserve': True, + 'space_allocation': True, + 'comment': 'carchi8py was here', + 'qos_policy_group': 'qos_policy_group_1', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'storage/luns', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['size'] = 1073741824 + my_obj.parameters['size_unit'] = 'bytes' + my_obj.parameters['os_type'] = 'linux' + + error = expect_and_capture_ansible_exception(my_obj.create_lun_rest, 'fail')['msg'] + print('Info: %s' % error) + assert "Error creating LUN /vol/volume1/qtree1/lun1: calling: storage/luns: got Expected error." == error + + +def test_successfully_delete_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['one_lun']), + ('DELETE', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_records']), + ]) + module_args = { + 'size': 1073741824, + 'size_unit': 'bytes', + 'os_type': 'linux', + 'state': 'absent', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('DELETE', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['size'] = 1073741824 + my_obj.parameters['size_unit'] = 'bytes' + my_obj.parameters['os_type'] = 'linux' + my_obj.parameters['os_type'] = 'absent' + my_obj.uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + + error = expect_and_capture_ansible_exception(my_obj.delete_lun_rest, 'fail')['msg'] + print('Info: %s' % error) + assert "Error deleting LUN /vol/volume1/qtree1/lun1: calling: storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412: got Expected error." == error + + +def test_error_delete_lun_missing_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['size'] = 1073741824 + my_obj.parameters['size_unit'] = 'bytes' + my_obj.parameters['os_type'] = 'linux' + my_obj.parameters['os_type'] = 'absent' + + error = expect_and_capture_ansible_exception(my_obj.delete_lun_rest, 'fail')['msg'] + print('Info: %s' % error) + assert "Error deleting LUN /vol/volume1/qtree1/lun1: UUID not found" == error + + +def test_successfully_rename_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['empty_records']), + ('GET', 'storage/luns', SRR['one_lun']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_records']), + ]) + module_args = { + 'name': '/vol/volume1/qtree12/lun1', + 'from_name': '/vol/volume1/qtree1/lun1', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_rename_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['name'] = '/vol/volume1/qtree12/lun1' + my_obj.parameters['from_name'] = '/vol/volume1/qtree1/lun1' + my_obj.uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + error = expect_and_capture_ansible_exception(my_obj.rename_lun_rest, 'fail', '/vol/volume1/qtree12/lun1')['msg'] + print('Info: %s' % error) + assert "Error renaming LUN /vol/volume1/qtree12/lun1: calling: storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412: got Expected error." == error + + +def test_error_rename_lun_missing_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['name'] = '/vol/volume1/qtree12/lun1' + my_obj.parameters['from_name'] = '/vol/volume1/qtree1/lun1' + error = expect_and_capture_ansible_exception(my_obj.rename_lun_rest, 'fail', '/vol/volume1/qtree12/lun1')['msg'] + print('Info: %s' % error) + assert "Error renaming LUN /vol/volume1/qtree12/lun1: UUID not found" == error + + +def test_successfully_resize_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['one_lun']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_records']), + ]) + module_args = { + 'size': 2147483648, + 'size_unit': 'bytes', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_resize_lun(): + ''' assert that + resize fails on error, except for a same size issue because of rounding errors + resize correctly return True/False to indicate that the size was changed or not + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['generic_error']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['error_same_size']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['size'] = 2147483648 + my_obj.parameters['size_unit'] = 'bytes' + my_obj.uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + error = expect_and_capture_ansible_exception(my_obj.resize_lun_rest, 'fail')['msg'] + print('Info: %s' % error) + assert "Error resizing LUN /vol/volume1/qtree1/lun1: calling: storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412: got Expected error." == error + assert not my_obj.resize_lun_rest() + assert my_obj.resize_lun_rest() + + +def test_error_resize_lun_missing_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['size'] = 2147483648 + my_obj.parameters['size_unit'] = 'bytes' + error = expect_and_capture_ansible_exception(my_obj.resize_lun_rest, 'fail')['msg'] + print('Info: %s' % error) + assert "Error resizing LUN /vol/volume1/qtree1/lun1: UUID not found" == error + + +def test_successfully_modify_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/luns', SRR['one_lun']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_records']), + ]) + module_args = { + 'comment': 'carchi8py was here', + 'qos_policy_group': 'qos_policy_group_12', + 'space_reserve': False, + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_modify_lun_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/luns', SRR['one_lun']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_records']), + ]) + module_args = { + 'comment': 'carchi8py was here', + 'qos_policy_group': 'qos_policy_group_12', + 'space_allocation': False, + 'space_reserve': False, + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_lun(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'carchi8py was here' + my_obj.parameters['qos_policy_group'] = 'qos_policy_group_12' + my_obj.parameters['space_allocation'] = False + my_obj.parameters['space_reserve'] = False + my_obj.uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + modify = {'comment': 'carchi8py was here', 'qos_policy_group': 'qos_policy_group_12', 'space_reserve': False, 'space_allocation': False} + error = expect_and_capture_ansible_exception(my_obj.modify_lun_rest, 'fail', modify)['msg'] + print('Info: %s' % error) + assert "Error modifying LUN /vol/volume1/qtree1/lun1: calling: storage/luns/1cd8a442-86d1-11e0-ae1c-123478563412: got Expected error." == error + + +def test_error_modify_lun_missing_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'carchi8py was here' + my_obj.parameters['qos_policy_group'] = 'qos_policy_group_12' + my_obj.parameters['space_allocation'] = False + my_obj.parameters['space_reserve'] = False + modify = {'comment': 'carchi8py was here', 'qos_policy_group': 'qos_policy_group_12', 'space_reserve': False, 'space_allocation': False} + error = expect_and_capture_ansible_exception(my_obj.modify_lun_rest, 'fail', modify)['msg'] + print('Info: %s' % error) + assert "Error modifying LUN /vol/volume1/qtree1/lun1: UUID not found" == error + + +def test_error_modify_lun_extra_option(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'carchi8py was here' + my_obj.parameters['qos_policy_group'] = 'qos_policy_group_12' + my_obj.parameters['space_allocation'] = False + my_obj.parameters['space_reserve'] = False + my_obj.uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + modify = {'comment': 'carchi8py was here', 'qos_policy_group': 'qos_policy_group_12', 'space_reserve': False, 'space_allocation': False, 'fake': 'fake'} + error = expect_and_capture_ansible_exception(my_obj.modify_lun_rest, 'fail', modify)['msg'] + print('Info: %s' % error) + assert "Error modifying LUN /vol/volume1/qtree1/lun1: Unknown parameters: {'fake': 'fake'}" == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py new file mode 100644 index 000000000..0259edf03 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_mcc_mediator.py @@ -0,0 +1,124 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_metrocluster ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_mcc_mediator \ + import NetAppOntapMccipMediator as mediator_module # module under test + +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_mediator_with_no_results': (200, {'num_records': 0}, None), + 'get_mediator_with_results': (200, { + 'num_records': 1, + 'records': [{ + 'ip_address': '10.10.10.10', + 'uuid': 'ebe27c49-1adf-4496-8335-ab862aebebf2' + }] + }, None) +} + + +class TestMyModule(unittest.TestCase): + """ Unit tests for na_ontap_metrocluster """ + + def setUp(self): + self.mock_mediator = { + 'mediator_address': '10.10.10.10', + 'mediator_user': 'carchi', + 'mediator_password': 'netapp1!' + } + + def mock_args(self): + return { + 'mediator_address': self.mock_mediator['mediator_address'], + 'mediator_user': self.mock_mediator['mediator_user'], + 'mediator_password': self.mock_mediator['mediator_password'], + 'hostname': 'test_host', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_alias_mock_object(self): + alias_obj = mediator_module() + return alias_obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + """Test successful rest create""" + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mediator_with_no_results'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create_idempotency(self, mock_request): + """Test successful rest create""" + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mediator_with_results'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_delete(self, mock_request): + """Test successful rest create""" + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mediator_with_results'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_delete(self, mock_request): + """Test successful rest create""" + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mediator_with_no_results'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py new file mode 100644 index 000000000..5ccc3eb95 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster.py @@ -0,0 +1,117 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_metrocluster ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_metrocluster \ + import NetAppONTAPMetroCluster as metrocluster_module # module under test + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_metrocluster_with_results': (200, {"local": { + "cluster": { + 'name': 'cluster1' + }, + "configuration_state": "configuration_error", # TODO: put correct state + "partner_cluster_reachable": "true", + }}, None), + 'get_metrocluster_with_no_results': (200, None, None), + 'metrocluster_post': (200, {'job': { + 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7', + '_links': { + 'self': { + 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}}} + }, None), + 'job': (200, { + "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14", + "description": "POST /api/cluster/metrocluster", + "state": "success", + "message": "There are not enough disks in Pool1.", + "code": 2432836, + "start_time": "2020-02-26T10:35:44-08:00", + "end_time": "2020-02-26T10:47:38-08:00", + "_links": { + "self": { + "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14" + } + } + }, None) +} + + +class TestMyModule(unittest.TestCase): + """ Unit tests for na_ontap_metrocluster """ + + def setUp(self): + self.mock_metrocluster = { + 'partner_cluster_name': 'cluster1', + 'node_name': 'carchi_vsim1', + 'partner_node_name': 'carchi_vsim3' + } + + def mock_args(self): + return { + 'dr_pairs': [{ + 'node_name': self.mock_metrocluster['node_name'], + 'partner_node_name': self.mock_metrocluster['partner_node_name'], + }], + 'partner_cluster_name': self.mock_metrocluster['partner_cluster_name'], + 'hostname': 'test_host', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_alias_mock_object(self): + alias_obj = metrocluster_module() + return alias_obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + """Test successful rest create""" + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_metrocluster_with_no_results'], + SRR['metrocluster_post'], + SRR['job'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_create_idempotency(self, mock_request): + """Test rest create idempotency""" + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_metrocluster_with_results'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py new file mode 100644 index 000000000..2bcc558aa --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_metrocluster_dr_group.py @@ -0,0 +1,164 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_metrocluster ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_metrocluster_dr_group \ + import NetAppONTAPMetroClusterDRGroup as mcc_dr_pairs_module # module under test + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_mcc_dr_pair_with_no_results': (200, {'records': [], 'num_records': 0}, None), + 'get_mcc_dr_pair_with_results': (200, {'records': [{'partner_cluster': {'name': 'rha2-b2b1_siteB'}, + 'dr_pairs': [{'node': {'name': 'rha17-a2'}, + 'partner': {'name': 'rha17-b2'}}, + {'node': {'name': 'rha17-b2'}, + 'partner': {'name': 'rha17-b1'}}], + 'id': '2'}], + 'num_records': 1}, None), + 'mcc_dr_pair_post': (200, {'job': { + 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7', + '_links': { + 'self': { + 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}}} + }, None), + 'get_mcc_dr_node': (200, {'records': [{'dr_group_id': '1'}], 'num_records': 1}, None), + 'get_mcc_dr_node_none': (200, {'records': [], 'num_records': 0}, None), + 'job': (200, { + "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14", + "description": "POST /api/cluster/metrocluster", + "state": "success", + "message": "There are not enough disks in Pool1.", + "code": 2432836, + "start_time": "2020-02-26T10:35:44-08:00", + "end_time": "2020-02-26T10:47:38-08:00", + "_links": { + "self": { + "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14" + } + } + }, None) +} + + +class TestMyModule(unittest.TestCase): + """ Unit tests for na_ontap_metrocluster """ + + def setUp(self): + self.mock_mcc_dr_pair = { + 'partner_cluster_name': 'rha2-b2b1_siteB', + 'node_name': 'rha17-a2', + 'partner_node_name': 'rha17-b2', + 'node_name2': 'rha17-b2', + 'partner_node_name2': 'rha17-b1' + + } + + def mock_args(self): + return { + 'dr_pairs': [{ + 'node_name': self.mock_mcc_dr_pair['node_name'], + 'partner_node_name': self.mock_mcc_dr_pair['partner_node_name'], + }, { + 'node_name': self.mock_mcc_dr_pair['node_name2'], + 'partner_node_name': self.mock_mcc_dr_pair['partner_node_name2'], + }], + 'partner_cluster_name': self.mock_mcc_dr_pair['partner_cluster_name'], + 'hostname': 'test_host', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_alias_mock_object(self): + alias_obj = mcc_dr_pairs_module() + return alias_obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + """Test successful rest create""" + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mcc_dr_pair_with_no_results'], + SRR['get_mcc_dr_pair_with_no_results'], + SRR['mcc_dr_pair_post'], + SRR['job'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_create_idempotency(self, mock_request): + """Test rest create idempotency""" + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mcc_dr_pair_with_results'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_delete(self, mock_request): + """Test successful rest delete""" + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mcc_dr_pair_with_results'], + SRR['mcc_dr_pair_post'], + SRR['job'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_delete_idempotency(self, mock_request): + """Test rest delete idempotency""" + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_mcc_dr_pair_with_no_results'], + SRR['get_mcc_dr_pair_with_no_results'], + SRR['get_mcc_dr_node_none'], + SRR['get_mcc_dr_node_none'], + SRR['job'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py new file mode 100644 index 000000000..64626e5ec --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_motd.py @@ -0,0 +1,164 @@ +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_motd """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_warning_was_raised, expect_and_capture_ansible_exception, call_main, create_module, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_motd import NetAppONTAPMotd as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def motd_info(msg): + return { + 'num-records': 1, + 'attributes-list': { + 'vserver-motd-info': { + 'message': msg, + 'vserver': 'ansible', + 'is-cluster-message-enabled': 'true'}} + } + + +ZRR = zapi_responses({ + 'motd_info': build_zapi_response(motd_info('motd_message')), + 'motd_none': build_zapi_response(motd_info('None')), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'use_rest', + 'motd_message': 'motd_message', + 'vserver': 'ansible', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + } + print('Info: %s' % call_main(my_main, module_args, fail=True)['msg']) + + +def test_ensure_motd_get_called(): + ''' fetching details of motd ''' + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.motd_get() is None + + +def test_ensure_get_called_existing(): + ''' test for existing motd''' + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.motd_get() + + +def test_motd_create(): + ''' test for creating motd''' + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['success']), + # idempotency + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + # modify + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['message'] = 'new_message' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_motd_delete(): + ''' test for deleting motd''' + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['motd_info']), + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-motd-get-iter', ZRR['motd_none']), + ]) + module_args = { + 'state': 'absent', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['error']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert expect_and_capture_ansible_exception(my_obj.motd_get, 'fail')['msg'] == zapi_error_message('Error fetching motd info') + assert expect_and_capture_ansible_exception(my_obj.modify_motd, 'fail')['msg'] == zapi_error_message('Error creating motd') + + +def test_rest_required(): + module_args = { + 'use_rest': 'always', + } + error_msg = 'netapp.ontap.na_ontap_motd is deprecated and only supports ZAPI. Please use netapp.ontap.na_ontap_login_messages.' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == 'Error: %s' % error_msg + register_responses([ + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['success']), + ('ZAPI', 'vserver-motd-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-motd-modify-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'auto', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_warning_was_raised('Falling back to ZAPI: %s' % error_msg) + module_args = { + 'use_rest': 'NevEr', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_warning_was_raised(error_msg) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_mappings.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_mappings.py new file mode 100644 index 000000000..5294a9537 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_mappings.py @@ -0,0 +1,282 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_name_mappings """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_name_mappings \ + import NetAppOntapNameMappings as my_module # module under test + + +# REST API canned responses when mocking send_request +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'mapping_record': ( + 200, + { + "records": [ + { + "client_match": "10.254.101.111/28", + "direction": "win_unix", + "index": 1, + "pattern": "ENGCIFS_AD_USER", + "replacement": "unix_user1", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + } + } + ], + "num_records": 1 + }, None + ), + 'mapping_record1': ( + 200, + { + "records": [ + { + "direction": "win_unix", + "index": 2, + "pattern": "ENGCIFS_AD_USERS", + "replacement": "unix_user", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + } + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'vserver': 'svm1', + 'direction': 'win_unix', + 'index': '1' +} + + +def test_get_name_mappings_rest(): + ''' Test retrieving name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']), + ]) + name_obj = create_module(my_module, DEFAULT_ARGS) + result = name_obj.get_name_mappings_rest() + assert result + + +def test_error_get_name_mappings_rest(): + ''' Test error retrieving name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['generic_error']), + ]) + error = create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + msg = "calling: name-services/name-mappings: got Expected error." + assert msg in error + + +def test_error_direction_s3_choices(): + ''' Test error when set s3 choices in older version ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + error = create_module(my_module, DEFAULT_ARGS, {'direction': 's3_unix'}, fail=True)['msg'] + msg = "Error: direction s3_unix requires ONTAP 9.12.1 or later" + assert msg in error + + +def test_create_name_mappings_rest(): + ''' Test create name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['empty_records']), + ('POST', 'name-services/name-mappings', SRR['empty_good']), + ]) + module_args = { + "pattern": "ENGCIFS_AD_USER", + "replacement": "unix_user1", + "client_match": "10.254.101.111/28", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_name_mappings_rest(): + ''' Test error create name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['empty_records']), + ('POST', 'name-services/name-mappings', SRR['generic_error']), + ]) + module_args = { + "pattern": "ENGCIFS_AD_USER", + "replacement": "unix_user1", + "client_match": "10.254.101.111/28", + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error on creating name mappings rest:" + assert msg in error + + +def test_delete_name_mappings_rest(): + ''' Test delete name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']), + ('DELETE', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/1', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_name_mappings_rest_error(): + ''' Test error delete name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']), + ('DELETE', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/1', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error on deleting name mappings rest:" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']) + ]) + module_args = { + "pattern": "ENGCIFS_AD_USER", + "replacement": "unix_user1", + "client_match": "10.254.101.111/28", + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['empty_records']) + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_name_mappings_pattern_rest(): + ''' Test modify name mapping pattern ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']), + ('PATCH', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/1', SRR['empty_good']), + ]) + module_args = { + "pattern": "ENGCIFS_AD_USERS", + "replacement": "unix_user2", + "client_match": "10.254.101.112/28", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_name_mappings_replacement_rest(): + ''' Test modify name mapping replacement ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record1']), + ('PATCH', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/1', SRR['empty_good']), + ]) + module_args = { + "replacement": "unix_user2" + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_name_mappings_client_match_rest(): + ''' Test modify name mapping client match ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']), + ('PATCH', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/1', SRR['empty_good']), + ]) + module_args = { + "client_match": "10.254.101.112/28", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_name_mappings_rest(): + ''' Test error modify name mapping ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['mapping_record']), + ('PATCH', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/1', SRR['generic_error']), + ]) + module_args = { + "pattern": "ENGCIFS_AD_USERS", + "replacement": "unix_user2", + "client_match": "10.254.101.112/28", + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error on modifying name mappings rest:" + assert msg in error + + +def test_swap_name_mappings_new_index_rest(): + ''' Test swap name mapping positions ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/name-mappings', SRR['empty_records']), + ('GET', 'name-services/name-mappings', SRR['mapping_record1']), + ('PATCH', 'name-services/name-mappings/02c9e252-41be-11e9-81d5-00a0986138f7/win_unix/2', SRR['empty_good']), + ]) + module_args = { + "index": "1", + "from_index": "2" + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_parameters_for_create_name_mappings_rest(): + ''' Validate parameters for create name mapping record ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'name-services/name-mappings', SRR['empty_records']), + ]) + module_args = { + "client_match": "10.254.101.111/28", + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error creating name mappings for an SVM, pattern and replacement are required in create." + assert msg in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py new file mode 100644 index 000000000..3b91e9be7 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_name_service_switch.py @@ -0,0 +1,181 @@ +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_name_service_switch ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import create_module,\ + patch_ansible, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_name_service_switch \ + import NetAppONTAPNsswitch as nss_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'https': 'True', + 'use_rest': 'never', + 'state': 'present', + 'vserver': 'test_vserver', + 'database_type': 'namemap', + 'sources': 'files,ldap', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' +} + + +nss_info = { + 'num-records': 1, + 'attributes-list': { + 'namservice-nsswitch-config-info': { + 'nameservice-database': 'namemap', + 'nameservice-sources': {'nss-source-type': 'files,ldap'} + } + } +} + + +ZRR = zapi_responses({ + 'nss_info': build_zapi_response(nss_info) +}) + + +def test_module_fail_when_required_args_missing(): + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "vserver", "database_type"] + error = create_module(nss_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_get_nonexistent_nss(): + register_responses([ + ('nameservice-nsswitch-get-iter', ZRR['no_records']) + ]) + nss_obj = create_module(nss_module, DEFAULT_ARGS) + assert nss_obj.get_name_service_switch() is None + + +def test_get_existing_nss(): + register_responses([ + ('nameservice-nsswitch-get-iter', ZRR['nss_info']) + ]) + nss_obj = create_module(nss_module, DEFAULT_ARGS) + assert nss_obj.get_name_service_switch() + + +def test_successfully_create(): + register_responses([ + ('nameservice-nsswitch-get-iter', ZRR['no_records']), + ('nameservice-nsswitch-create', ZRR['success']) + ]) + assert create_and_apply(nss_module, DEFAULT_ARGS)['changed'] + + +def test_successfully_modify(): + register_responses([ + ('nameservice-nsswitch-get-iter', ZRR['nss_info']), + ('nameservice-nsswitch-modify', ZRR['success']) + ]) + assert create_and_apply(nss_module, DEFAULT_ARGS, {'sources': 'files'})['changed'] + + +def test_successfully_delete(): + register_responses([ + ('nameservice-nsswitch-get-iter', ZRR['nss_info']), + ('nameservice-nsswitch-destroy', ZRR['success']) + ]) + assert create_and_apply(nss_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_if_all_methods_catch_exception_zapi(): + ''' test error zapi - get/create/modify/delete''' + register_responses([ + ('nameservice-nsswitch-get-iter', ZRR['error']), + ('nameservice-nsswitch-create', ZRR['error']), + ('nameservice-nsswitch-modify', ZRR['error']), + ('nameservice-nsswitch-destroy', ZRR['error']) + ]) + nss_obj = create_module(nss_module, DEFAULT_ARGS) + + assert 'Error fetching name service switch' in expect_and_capture_ansible_exception(nss_obj.get_name_service_switch, 'fail')['msg'] + assert 'Error on creating name service switch' in expect_and_capture_ansible_exception(nss_obj.create_name_service_switch, 'fail')['msg'] + assert 'Error on modifying name service switch' in expect_and_capture_ansible_exception(nss_obj.modify_name_service_switch, 'fail', {})['msg'] + assert 'Error on deleting name service switch' in expect_and_capture_ansible_exception(nss_obj.delete_name_service_switch, 'fail')['msg'] + + +SRR = rest_responses({ + 'nss_info': (200, {"records": [ + { + 'nsswitch': { + 'group': ['files'], + 'hosts': ['files', 'dns'], + 'namemap': ['files'], + 'netgroup': ['files'], + 'passwd': ['files'] + }, + 'uuid': '6647fa13'} + ], 'num_records': 1}, None), + 'nss_info_no_record': (200, {"records": [ + {'uuid': '6647fa13'} + ], 'num_records': 1}, None), + 'svm_uuid': (200, {"records": [ + {'uuid': '6647fa13'} + ], "num_records": 1}, None) +}) + + +def test_successfully_modify_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['nss_info_no_record']), + ('PATCH', 'svm/svms/6647fa13', SRR['success']), + ]) + args = {'sources': 'files', 'use_rest': 'always'} + assert create_and_apply(nss_module, DEFAULT_ARGS, args)['changed'] + + +def test_error_get_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['zero_records']) + ]) + error = "Error: Specified vserver test_vserver not found" + assert error in create_and_apply(nss_module, DEFAULT_ARGS, {'use_rest': 'always'}, fail=True)['msg'] + + +def test_error_delete_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['nss_info']) + ]) + args = {'state': 'absent', 'use_rest': 'always'} + error = "Error: deleting name service switch not supported in REST." + assert error in create_and_apply(nss_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_if_all_methods_catch_exception_rest(): + ''' test error rest - get/modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['generic_error']), + ('PATCH', 'svm/svms/6647fa13', SRR['generic_error']), + ]) + nss_obj = create_module(nss_module, DEFAULT_ARGS, {'use_rest': 'always'}) + nss_obj.svm_uuid = '6647fa13' + assert 'Error fetching name service switch' in expect_and_capture_ansible_exception(nss_obj.get_name_service_switch, 'fail')['msg'] + assert 'Error on modifying name service switch' in expect_and_capture_ansible_exception(nss_obj.modify_name_service_switch_rest, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py new file mode 100644 index 000000000..78278bc7b --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ndmp.py @@ -0,0 +1,196 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ndmp \ + import NetAppONTAPNdmp as ndmp_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'get_uuid': (200, {'records': [{'uuid': 'testuuid'}]}, None), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, 'Error fetching ndmp from ansible: NetApp API failed. Reason - Unexpected error:', + "REST API currently does not support 'backup_log_enable, ignore_ctime_enabled'"), + 'get_ndmp_uuid': (200, {"records": [{"svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}}]}, None), + 'get_ndmp': (200, {"enabled": True, "authentication_types": ["test"], + "records": [{"svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}}]}, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.type = kind + self.data = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'ndmp': + xml = self.build_ndmp_info(self.data) + if self.type == 'error': + error = netapp_utils.zapi.NaApiError('test', 'error') + raise error + self.xml_out = xml + return xml + + @staticmethod + def build_ndmp_info(ndmp_details): + ''' build xml data for ndmp ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'ndmp-vserver-attributes-info': { + 'ignore_ctime_enabled': ndmp_details['ignore_ctime_enabled'], + 'backup_log_enable': ndmp_details['backup_log_enable'], + + 'authtype': [ + {'ndmpd-authtypes': 'plaintext'}, + {'ndmpd-authtypes': 'challenge'} + ] + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_ndmp = { + 'ignore_ctime_enabled': True, + 'backup_log_enable': 'false', + 'authtype': 'plaintext', + 'enable': True + } + + def mock_args(self, rest=False): + if rest: + return { + 'authtype': self.mock_ndmp['authtype'], + 'enable': True, + 'vserver': 'ansible', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': 'False' + } + else: + return { + 'vserver': 'ansible', + 'authtype': self.mock_ndmp['authtype'], + 'ignore_ctime_enabled': self.mock_ndmp['ignore_ctime_enabled'], + 'backup_log_enable': self.mock_ndmp['backup_log_enable'], + 'enable': self.mock_ndmp['enable'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' + } + + def get_ndmp_mock_object(self, kind=None, cx_type='zapi'): + """ + Helper method to return an na_ontap_ndmp object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_ndmp object + """ + obj = ndmp_module() + if cx_type == 'zapi': + obj.asup_log_for_cserver = Mock(return_value=None) + obj.server = Mock() + obj.server.invoke_successfully = Mock() + if kind is None: + obj.server = MockONTAPConnection() + else: + obj.server = MockONTAPConnection(kind=kind, data=self.mock_ndmp) + return obj + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ndmp.NetAppONTAPNdmp.ndmp_get_iter') + def test_successful_modify(self, ger_ndmp): + ''' Test successful modify ndmp''' + data = self.mock_args() + set_module_args(data) + current = { + 'ignore_ctime_enabled': False, + 'backup_log_enable': True + } + ger_ndmp.side_effect = [ + current + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_ndmp_mock_object('ndmp').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ndmp.NetAppONTAPNdmp.ndmp_get_iter') + def test_modify_error(self, ger_ndmp): + ''' Test modify error ''' + data = self.mock_args() + set_module_args(data) + current = { + 'ignore_ctime_enabled': False, + 'backup_log_enable': True + } + ger_ndmp.side_effect = [ + current + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ndmp_mock_object('error').apply() + assert exc.value.args[0]['msg'] == 'Error modifying ndmp on ansible: NetApp API failed. Reason - test:error' + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.mock_args() + data['use_rest'] = 'Always' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ndmp_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['msg'] == SRR['generic_error'][3] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successfully_modify(self, mock_request): + data = self.mock_args(rest=True) + data['use_rest'] = 'Always' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], # Was not called because of Always, but we now call it :) + SRR['get_ndmp_uuid'], # for get svm uuid: protocols/ndmp/svms + SRR['get_ndmp'], # for get ndmp details: '/protocols/ndmp/svms/' + uuid + SRR['get_ndmp_uuid'], # for get svm uuid: protocols/ndmp/svms (before modify) + SRR['empty_good'], # modify (patch) + SRR['end_of_sequence'], + ] + my_obj = ndmp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py new file mode 100644 index 000000000..7e3e58783 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_ifgrp.py @@ -0,0 +1,737 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp \ + import NetAppOntapIfGrp as ifgrp_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'ifgrp': + xml = self.build_ifgrp_info(self.params) + elif self.kind == 'ifgrp-ports': + xml = self.build_ifgrp_ports_info(self.params) + elif self.kind == 'ifgrp-fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_ifgrp_info(ifgrp_details): + ''' build xml data for ifgrp-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'net-port-info': { + 'port': ifgrp_details['name'], + 'ifgrp-distribution-function': 'mac', + 'ifgrp-mode': ifgrp_details['mode'], + 'node': ifgrp_details['node'] + } + } + } + xml.translate_struct(attributes) + return xml + + @staticmethod + def build_ifgrp_ports_info(data): + ''' build xml data for ifgrp-ports ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'attributes': { + 'net-ifgrp-info': { + 'ports': [ + {'lif-bindable': data['ports'][0]}, + {'lif-bindable': data['ports'][1]}, + {'lif-bindable': data['ports'][2]} + ] + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_ifgrp = { + 'name': 'test', + 'port': 'a1', + 'node': 'test_vserver', + 'mode': 'something' + } + + def mock_args(self): + return { + 'name': self.mock_ifgrp['name'], + 'distribution_function': 'mac', + 'ports': [self.mock_ifgrp['port']], + 'node': self.mock_ifgrp['node'], + 'mode': self.mock_ifgrp['mode'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'feature_flags': {'no_cserver_ems': True}, + 'use_rest': 'never' + } + + def get_ifgrp_mock_object(self, kind=None, data=None): + """ + Helper method to return an na_ontap_net_ifgrp object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_net_ifgrp object + """ + obj = ifgrp_module() + obj.autosupport_log = Mock(return_value=None) + if data is None: + data = self.mock_ifgrp + obj.server = MockONTAPConnection(kind=kind, data=data) + return obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + ifgrp_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_nonexistent_ifgrp(self): + ''' Test if get_ifgrp returns None for non-existent ifgrp ''' + set_module_args(self.mock_args()) + result = self.get_ifgrp_mock_object().get_if_grp() + assert result is None + + def test_get_existing_ifgrp(self): + ''' Test if get_ifgrp returns details for existing ifgrp ''' + set_module_args(self.mock_args()) + result = self.get_ifgrp_mock_object('ifgrp').get_if_grp() + assert result['name'] == self.mock_ifgrp['name'] + + def test_successful_create(self): + ''' Test successful create ''' + data = self.mock_args() + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_ifgrp_mock_object().apply() + assert exc.value.args[0]['changed'] + + def test_successful_delete(self): + ''' Test delete existing volume ''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_ifgrp_mock_object('ifgrp').apply() + assert exc.value.args[0]['changed'] + + def test_successful_modify(self): + ''' Test delete existing volume ''' + data = self.mock_args() + data['ports'] = ['1', '2', '3'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_ifgrp_mock_object('ifgrp').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.get_if_grp') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.create_if_grp') + def test_create_called(self, create_ifgrp, get_ifgrp): + data = self.mock_args() + set_module_args(data) + get_ifgrp.return_value = None + with pytest.raises(AnsibleExitJson) as exc: + self.get_ifgrp_mock_object().apply() + get_ifgrp.assert_called_with() + create_ifgrp.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.add_port_to_if_grp') + def test_if_ports_are_added_after_create(self, add_ports): + ''' Test successful create ''' + data = self.mock_args() + set_module_args(data) + self.get_ifgrp_mock_object().create_if_grp() + add_ports.assert_called_with('a1') + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.get_if_grp') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.delete_if_grp') + def test_delete_called(self, delete_ifgrp, get_ifgrp): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + get_ifgrp.return_value = Mock() + with pytest.raises(AnsibleExitJson) as exc: + self.get_ifgrp_mock_object().apply() + get_ifgrp.assert_called_with() + delete_ifgrp.assert_called_with(None) + + def test_get_return_value(self): + data = self.mock_args() + set_module_args(data) + result = self.get_ifgrp_mock_object('ifgrp').get_if_grp() + assert result['name'] == data['name'] + assert result['mode'] == data['mode'] + assert result['node'] == data['node'] + + def test_get_ports_list(self): + data = self.mock_args() + data['ports'] = ['e0a', 'e0b', 'e0c'] + set_module_args(data) + result = self.get_ifgrp_mock_object('ifgrp-ports', data).get_if_grp_ports() + assert result['ports'] == data['ports'] + + def test_add_port_packet(self): + data = self.mock_args() + set_module_args(data) + obj = self.get_ifgrp_mock_object('ifgrp') + obj.add_port_to_if_grp('addme') + assert obj.server.xml_in['port'] == 'addme' + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.remove_port_to_if_grp') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.add_port_to_if_grp') + def test_modify_ports_calls_remove_existing_ports(self, add_port, remove_port): + ''' Test if already existing ports are not being added again ''' + data = self.mock_args() + data['ports'] = ['1', '2'] + set_module_args(data) + self.get_ifgrp_mock_object('ifgrp').modify_ports(current_ports=['1', '2', '3']) + assert remove_port.call_count == 1 + assert add_port.call_count == 0 + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.remove_port_to_if_grp') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_ifgrp.NetAppOntapIfGrp.add_port_to_if_grp') + def test_modify_ports_calls_add_new_ports(self, add_port, remove_port): + ''' Test new ports are added ''' + data = self.mock_args() + data['ports'] = ['1', '2', '3', '4'] + set_module_args(data) + self.get_ifgrp_mock_object('ifgrp').modify_ports(current_ports=['1', '2']) + assert remove_port.call_count == 0 + assert add_port.call_count == 2 + + def test_get_ports_returns_none(self): + set_module_args(self.mock_args()) + result = self.get_ifgrp_mock_object().get_if_grp_ports() + assert result['ports'] == [] + result = self.get_ifgrp_mock_object().get_if_grp() + assert result is None + + def test_if_all_methods_catch_exception(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleFailJson) as exc: + self.get_ifgrp_mock_object('ifgrp-fail').get_if_grp() + assert 'Error getting if_group test' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ifgrp_mock_object('ifgrp-fail').create_if_grp() + assert 'Error creating if_group test' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ifgrp_mock_object('ifgrp-fail').get_if_grp_ports() + assert 'Error getting if_group ports test' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ifgrp_mock_object('ifgrp-fail').add_port_to_if_grp('test-port') + assert 'Error adding port test-port to if_group test' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ifgrp_mock_object('ifgrp-fail').remove_port_to_if_grp('test-port') + assert 'Error removing port test-port to if_group test' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + self.get_ifgrp_mock_object('ifgrp-fail').delete_if_grp() + assert 'Error deleting if_group test' in exc.value.args[0]['msg'] + + +def default_args(): + args = { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_7': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'ifgrp_record': (200, { + "num_records": 2, + "records": [ + { + 'lag': { + 'distribution_policy': 'ip', + 'mode': 'multimode_lacp' + }, + 'name': 'a0b', + 'node': {'name': 'mohan9cluster2-01'}, + 'type': 'lag', + 'uuid': '1b830a46-47cd-11ec-90df-005056b3dfc8' + }, + { + 'broadcast_domain': { + 'ipspace': {'name': 'ip1'}, + 'name': 'test1' + }, + 'lag': { + 'distribution_policy': 'ip', + 'member_ports': [ + { + 'name': 'e0d', + 'node': {'name': 'mohan9cluster2-01'}, + }], + 'mode': 'multimode_lacp'}, + 'name': 'a0d', + 'node': {'name': 'mohan9cluster2-01'}, + 'type': 'lag', + 'uuid': '5aeebc96-47d7-11ec-90df-005056b3dfc8' + }, + { + 'broadcast_domain': { + 'ipspace': {'name': 'ip1'}, + 'name': 'test1' + }, + 'lag': { + 'distribution_policy': 'ip', + 'member_ports': [ + { + 'name': 'e0c', + 'node': {'name': 'mohan9cluster2-01'}, + }, + { + 'name': 'e0a', + 'node': {'name': 'mohan9cluster2-01'}, + }], + 'mode': 'multimode_lacp' + }, + 'name': 'a0d', + 'node': {'name': 'mohan9cluster2-01'}, + 'type': 'lag', + 'uuid': '5aeebc96-47d7-11ec-90df-005056b3dsd4' + }] + }, None), + 'ifgrp_record_create': (200, { + "num_records": 1, + "records": [ + { + 'lag': { + 'distribution_policy': 'ip', + 'mode': 'multimode_lacp' + }, + 'name': 'a0b', + 'node': {'name': 'mohan9cluster2-01'}, + 'type': 'lag', + 'uuid': '1b830a46-47cd-11ec-90df-005056b3dfc8' + }] + }, None), + 'ifgrp_record_modify': (200, { + "num_records": 1, + "records": [ + { + 'broadcast_domain': { + 'ipspace': {'name': 'ip1'}, + 'name': 'test1' + }, + 'lag': { + 'distribution_policy': 'ip', + 'member_ports': [ + { + 'name': 'e0c', + 'node': {'name': 'mohan9cluster2-01'}, + }, + { + 'name': 'e0d', + 'node': {'name': 'mohan9cluster2-01'}, + }], + 'mode': 'multimode_lacp' + }, + 'name': 'a0d', + 'node': {'name': 'mohan9cluster2-01'}, + 'type': 'lag', + 'uuid': '5aeebc96-47d7-11ec-90df-005056b3dsd4' + }] + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + ifgrp_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +def test_module_fail_when_broadcast_domain_ipspace(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + ifgrp_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_broadcast_domain_ipspace_rest_ontap96(mock_request, patch_ansible): + '''throw error if broadcast_domain and ipspace are not set''' + args = dict(default_args()) + args['ports'] = "e0c" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + args['node'] = "mohan9cluster2-01" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_6'], # get version + ] + with pytest.raises(AnsibleFailJson) as exc: + ifgrp_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'are mandatory fields with ONTAP 9.6 and 9.7' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_broadcast_domain_ipspace_rest_required_together(mock_request, patch_ansible): + '''throw error if one of broadcast_domain or ipspace only set''' + args = dict(default_args()) + args['ports'] = "e0c" + args['distribution_function'] = "ip" + args['ipspace'] = "Default" + args['mode'] = "multimode_lacp" + args['node'] = "mohan9cluster2-01" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_6'], # get version + ] + with pytest.raises(AnsibleFailJson) as exc: + ifgrp_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'parameters are required together: broadcast_domain, ipspace' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_ifgrp_not_found_from_lag_ports(mock_request, patch_ansible): + ''' throw error if lag not found with both ports and from_lag_ports ''' + args = dict(default_args()) + args['node'] = "mohan9-vsim1" + args['ports'] = "e0f" + args['from_lag_ports'] = "e0l" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'] # get for ports + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "Error: cannot find LAG matching from_lag_ports: '['e0l']'." + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_from_lag_ports_1_or_more_ports_not_in_current(mock_request, patch_ansible): + ''' throw error if 1 or more from_lag_ports not found in current ''' + args = dict(default_args()) + args['node'] = "mohan9-vsim1" + args['ports'] = "e0f" + args['from_lag_ports'] = "e0d,e0h" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + ] + my_obj = ifgrp_module() + my_obj.current_records = SRR['ifgrp_record'][1]['records'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "Error: cannot find LAG matching from_lag_ports: '['e0d', 'e0h']'." + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_from_lag_ports_are_in_different_LAG(mock_request, patch_ansible): + ''' throw error if ports in from_lag_ports are in different LAG ''' + args = dict(default_args()) + args['node'] = "mohan9-vsim1" + args['ports'] = "e0f" + args['from_lag_ports'] = "e0d,e0c" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'] # get + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "'e0d, e0c' are in different LAG" + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_try_to_delete_only_partial_match_found(mock_request, patch_ansible): + ''' delete only with exact match of ports''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + args['broadcast_domain'] = "test1" + args['ipspace'] = "ip1" + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'], # get + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_try_to_delete_ports_in_different_LAG(mock_request, patch_ansible): + ''' if ports are in different LAG, not to delete and returk ok''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c,e0d" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + args['broadcast_domain'] = "test1" + args['ipspace'] = "ip1" + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'], # get + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_partial_match(mock_request, patch_ansible): + '''fail if partial match only found in from_lag_ports''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['from_lag_ports'] = "e0c,e0a,e0v" + args['ports'] = "e0n" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + args['state'] = 'present' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'], # get + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "Error: cannot find LAG matching from_lag_ports: '['e0c', 'e0a', 'e0v']'." + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_partial_match_ports_empty_record_from_lag_ports(mock_request, patch_ansible): + ''' remove port e0a from ifgrp a0d with ports e0d,e0c''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c" + args['from_lag_ports'] = "e0k" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record_modify'] # get + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "Error: cannot find LAG matching from_lag_ports: '['e0k']'." + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_create_ifgrp_port(mock_request, patch_ansible): + ''' test create ifgrp ''' + args = dict(default_args()) + args['node'] = "mohan9-vsim1" + args['ports'] = "e0c,e0a" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record_create'], # get + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_create_ifgrp_port_idempotent(mock_request, patch_ansible): + ''' test create ifgrp idempotent ''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c,e0a" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'], # get + SRR['end_of_sequence'] + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_modify_ifgrp_port(mock_request, patch_ansible): + ''' remove port e0a from ifgrp a0d with ports e0d,e0c''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c" + args['from_lag_ports'] = "e0c,e0d" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record_modify'], # get + SRR['empty_good'], # modify + SRR['end_of_sequence'] + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_modify_ifgrp_broadcast_domain(mock_request, patch_ansible): + ''' modify broadcast domain and ipspace''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c,e0a" + args['from_lag_ports'] = 'e0c' + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + args['broadcast_domain'] = "test1" + args['ipspace'] = "Default" + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'], # get + SRR['empty_good'], # modify + SRR['end_of_sequence'] + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_delete_ifgrp(mock_request, patch_ansible): + ''' test delete LAG''' + args = dict(default_args()) + args['node'] = "mohan9cluster2-01" + args['ports'] = "e0c,e0a" + args['distribution_function'] = "ip" + args['mode'] = "multimode_lacp" + args['broadcast_domain'] = "test1" + args['ipspace'] = "ip1" + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['ifgrp_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = ifgrp_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py new file mode 100644 index 000000000..b58e02d1b --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_port.py @@ -0,0 +1,331 @@ +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_port \ + import NetAppOntapNetPort as port_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.type = kind + self.data = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + if self.type == 'raise': + raise netapp_utils.zapi.NaApiError(code='1111', message='forcing an error') + self.xml_in = xml + if self.type == 'port': + xml = self.build_port_info(self.data) + self.xml_out = xml + return xml + + @staticmethod + def build_port_info(port_details): + ''' build xml data for net-port-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'net-port-info': { + # 'port': port_details['port'], + 'mtu': str(port_details['mtu']), + 'is-administrative-auto-negotiate': 'true', + 'is-administrative-up': str(port_details['up_admin']).lower(), # ZAPI uses 'true', 'false' + 'ipspace': 'default', + 'administrative-flowcontrol': port_details['flowcontrol_admin'], + 'node': port_details['node'] + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.mock_port = { + 'node': 'test', + 'ports': 'a1', + 'up_admin': True, + 'flowcontrol_admin': 'something', + 'mtu': 1000 + } + + def mock_args(self): + return { + 'node': self.mock_port['node'], + 'flowcontrol_admin': self.mock_port['flowcontrol_admin'], + 'ports': [self.mock_port['ports']], + 'mtu': self.mock_port['mtu'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'feature_flags': {'no_cserver_ems': True}, + 'use_rest': 'never' + } + + def get_port_mock_object(self, kind=None, data=None): + """ + Helper method to return an na_ontap_net_port object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_net_port object + """ + obj = port_module() + obj.autosupport_log = Mock(return_value=None) + if data is None: + data = self.mock_port + obj.server = MockONTAPConnection(kind=kind, data=data) + return obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + port_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_nonexistent_port(self): + ''' Test if get_net_port returns None for non-existent port ''' + set_module_args(self.mock_args()) + result = self.get_port_mock_object().get_net_port('test') + assert result is None + + def test_get_existing_port(self): + ''' Test if get_net_port returns details for existing port ''' + set_module_args(self.mock_args()) + result = self.get_port_mock_object('port').get_net_port('test') + assert result['mtu'] == self.mock_port['mtu'] + assert result['flowcontrol_admin'] == self.mock_port['flowcontrol_admin'] + assert result['up_admin'] == self.mock_port['up_admin'] + + def test_successful_modify(self): + ''' Test modify_net_port ''' + data = self.mock_args() + data['mtu'] = '2000' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object('port').apply() + assert exc.value.args[0]['changed'] + + def test_successful_modify_int(self): + ''' Test modify_net_port ''' + data = self.mock_args() + data['mtu'] = 2000 + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object('port').apply() + assert exc.value.args[0]['changed'] + print(exc.value.args[0]['modify']) + + def test_successful_modify_bool(self): + ''' Test modify_net_port ''' + data = self.mock_args() + data['up_admin'] = False + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object('port').apply() + assert exc.value.args[0]['changed'] + print(exc.value.args[0]['modify']) + + def test_successful_modify_str(self): + ''' Test modify_net_port ''' + data = self.mock_args() + data['flowcontrol_admin'] = 'anything' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object('port').apply() + assert exc.value.args[0]['changed'] + print(exc.value.args[0]['modify']) + + def test_successful_modify_multiple_ports(self): + ''' Test modify_net_port ''' + data = self.mock_args() + data['ports'] = ['a1', 'a2'] + data['mtu'] = '2000' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object('port').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_port.NetAppOntapNetPort.get_net_port') + def test_get_called(self, get_port): + ''' Test get_net_port ''' + data = self.mock_args() + data['ports'] = ['a1', 'a2'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object('port').apply() + assert get_port.call_count == 2 + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_port.NetAppOntapNetPort.get_net_port') + def test_negative_not_found_1(self, get_port): + ''' Test get_net_port ''' + data = self.mock_args() + data['ports'] = ['a1'] + set_module_args(data) + get_port.return_value = None + with pytest.raises(AnsibleFailJson) as exc: + self.get_port_mock_object('port').apply() + msg = 'Error: port: a1 not found on node: test - check node name.' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_port.NetAppOntapNetPort.get_net_port') + def test_negative_not_found_2(self, get_port): + ''' Test get_net_port ''' + data = self.mock_args() + data['ports'] = ['a1', 'a2'] + set_module_args(data) + get_port.return_value = None + with pytest.raises(AnsibleFailJson) as exc: + self.get_port_mock_object('port').apply() + msg = 'Error: ports: a1, a2 not found on node: test - check node name.' + assert msg in exc.value.args[0]['msg'] + + def test_negative_zapi_exception_in_get(self): + ''' Test get_net_port ''' + data = self.mock_args() + data['ports'] = ['a1', 'a2'] + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_port_mock_object('raise').get_net_port('a1') + msg = 'Error getting net ports for test: NetApp API failed. Reason - 1111:forcing an error' + assert msg in exc.value.args[0]['msg'] + + def test_negative_zapi_exception_in_modify(self): + ''' Test get_net_port ''' + data = self.mock_args() + data['ports'] = ['a1', 'a2'] + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_port_mock_object('raise').modify_net_port('a1', dict()) + msg = 'Error modifying net ports for test: NetApp API failed. Reason - 1111:forcing an error' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') + def test_negative_no_netapp_lib(self, get_port): + ''' Test get_net_port ''' + data = self.mock_args() + set_module_args(data) + get_port.return_value = False + with pytest.raises(AnsibleFailJson) as exc: + self.get_port_mock_object('port').apply() + msg = 'the python NetApp-Lib module is required' + assert msg in exc.value.args[0]['msg'] + + +def default_args(): + return { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_7': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'vlan_record': (200, { + "num_records": 1, + "records": [{ + 'broadcast_domain': { + 'ipspace': {'name': 'Default'}, + 'name': 'test1' + }, + 'enabled': False, + 'name': 'e0c-15', + 'node': {'name': 'mohan9-vsim1'}, + 'uuid': '97936a14-30de-11ec-ac4d-005056b3d8c8' + }] + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + port_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_unsupported_rest_properties(mock_request, patch_ansible): + '''throw error if unsupported rest properties are set''' + args = dict(default_args()) + args['node'] = "mohan9-vsim1" + args['ports'] = "e0d,e0d-15" + args['mtu'] = 1500 + args['duplex_admin'] = 'admin' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(args) + port_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'REST API currently does not support' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_enable_port(mock_request, patch_ansible): + ''' test enable vlan''' + args = dict(default_args()) + args['node'] = "mohan9-vsim1" + args['ports'] = "e0c-15" + args['up_admin'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['vlan_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py new file mode 100644 index 000000000..a886e87a3 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_routes.py @@ -0,0 +1,359 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import create_module,\ + patch_ansible, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, build_zapi_error, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_routes \ + import NetAppOntapNetRoutes as net_route_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + 'https': 'True', + 'use_rest': 'never', + 'state': 'present', + 'destination': '176.0.0.0/24', + 'gateway': '10.193.72.1', + 'vserver': 'test_vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'metric': 70 +} + + +def route_info_zapi(destination='176.0.0.0/24', gateway='10.193.72.1', metric=70): + return { + 'attributes': { + 'net-vs-routes-info': { + 'address-family': 'ipv4', + 'destination': destination, + 'gateway': gateway, + 'metric': metric, + 'vserver': 'test_vserver' + } + } + } + + +ZRR = zapi_responses({ + 'net_route_info': build_zapi_response(route_info_zapi()), + 'net_route_info_gateway': build_zapi_response(route_info_zapi(gateway='10.193.0.1', metric=40)), + 'net_route_info_destination': build_zapi_response(route_info_zapi(destination='178.0.0.1/24', metric=40)), + 'error_15661': build_zapi_error(15661, 'not_exists_error'), + 'error_13001': build_zapi_error(13001, 'already exists') +}) + + +SRR = rest_responses({ + 'net_routes_record': (200, { + 'records': [ + { + "destination": {"address": "176.0.0.0", "netmask": "24", "family": "ipv4"}, + "gateway": '10.193.72.1', + "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412', + "metric": 70, + "svm": {"name": "test_vserver"} + } + ] + }, None), + 'net_routes_cluster': (200, { + 'records': [ + { + "destination": {"address": "176.0.0.0", "netmask": "24", "family": "ipv4"}, + "gateway": '10.193.72.1', + "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412', + "metric": 70, + "scope": "cluster" + } + ] + }, None), + 'modified_record': (200, { + 'records': [ + { + "destination": {"address": "0.0.0.0", "netmask": "0", "family": "ipv4"}, + "gateway": '10.193.72.1', + "uuid": '1cd8a442-86d1-11e0-ae1c-123478563412', + "scope": "cluster", + "metric": 90 + } + ] + }, None) +}) + + +def test_module_fail_when_required_args_missing(): + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "destination", "gateway"] + error = create_module(net_route_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_get_nonexistent_net_route(): + ''' Test if get_net_route returns None for non-existent net_route ''' + register_responses([ + ('net-routes-get', ZRR['no_records']) + ]) + assert create_module(net_route_module, DEFAULT_ARGS).get_net_route() is None + + +def test_get_nonexistent_net_route_15661(): + ''' Test if get_net_route returns None for non-existent net_route + when ZAPI returns an exception for a route not found + ''' + register_responses([ + ('net-routes-get', ZRR['error_15661']) + ]) + assert create_module(net_route_module, DEFAULT_ARGS).get_net_route() is None + + +def test_get_existing_route(): + ''' Test if get_net_route returns details for existing net_route ''' + register_responses([ + ('net-routes-get', ZRR['net_route_info']) + ]) + result = create_module(net_route_module, DEFAULT_ARGS).get_net_route() + assert result['destination'] == DEFAULT_ARGS['destination'] + assert result['gateway'] == DEFAULT_ARGS['gateway'] + + +def test_create_error_missing_param(): + ''' Test if create throws an error if destination is not specified''' + error = 'missing required arguments: destination' + assert error in create_module(net_route_module, {'hostname': 'host', 'gateway': 'gate'}, fail=True)['msg'] + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('net-routes-get', ZRR['empty']), + ('net-routes-create', ZRR['success']), + ('net-routes-get', ZRR['net_route_info']), + ]) + assert create_and_apply(net_route_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS)['changed'] + + +def test_create_zapi_ignore_route_exist(): + ''' Test NaApiError on create ''' + register_responses([ + ('net-routes-get', ZRR['empty']), + ('net-routes-create', ZRR['error_13001']) + ]) + assert create_and_apply(net_route_module, DEFAULT_ARGS)['changed'] + + +def test_successful_create_zapi_no_metric(): + ''' Test successful create ''' + register_responses([ + ('net-routes-get', ZRR['empty']), + ('net-routes-create', ZRR['success']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['metric'] + assert create_and_apply(net_route_module, DEFAULT_ARGS)['changed'] + + +def test_successful_delete(): + ''' Test successful delete ''' + register_responses([ + ('net-routes-get', ZRR['net_route_info']), + ('net-routes-destroy', ZRR['success']), + ('net-routes-get', ZRR['empty']), + ]) + assert create_and_apply(net_route_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_successful_modify_metric(): + ''' Test successful modify metric ''' + register_responses([ + ('net-routes-get', ZRR['net_route_info']), + ('net-routes-destroy', ZRR['success']), + ('net-routes-create', ZRR['success']) + ]) + assert create_and_apply(net_route_module, DEFAULT_ARGS, {'metric': '40'})['changed'] + + +def test_successful_modify_gateway(): + ''' Test successful modify gateway ''' + register_responses([ + ('net-routes-get', ZRR['empty']), + ('net-routes-get', ZRR['net_route_info']), + ('net-routes-destroy', ZRR['success']), + ('net-routes-create', ZRR['success']), + ('net-routes-get', ZRR['net_route_info_gateway']) + ]) + args = {'from_gateway': '10.193.72.1', 'gateway': '10.193.0.1', 'metric': 40} + assert create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_modify_destination(): + ''' Test successful modify destination ''' + register_responses([ + ('net-routes-get', ZRR['empty']), + ('net-routes-get', ZRR['net_route_info']), + ('net-routes-destroy', ZRR['success']), + ('net-routes-create', ZRR['success']), + ('net-routes-get', ZRR['net_route_info_gateway']) + ]) + args = {'from_destination': '176.0.0.0/24', 'destination': '178.0.0.1/24', 'metric': 40} + assert create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception_zapi(): + ''' test error zapi - get/create/modify/delete''' + register_responses([ + # ZAPI get/create/delete error. + ('net-routes-get', ZRR['error']), + ('net-routes-create', ZRR['error']), + ('net-routes-destroy', ZRR['error']), + # ZAPI modify error. + ('net-routes-get', ZRR['net_route_info']), + ('net-routes-destroy', ZRR['success']), + ('net-routes-create', ZRR['error']), + ('net-routes-create', ZRR['success']), + # REST get/create/delete error. + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['generic_error']), + ('POST', 'network/ip/routes', SRR['generic_error']), + ('DELETE', 'network/ip/routes/12345', SRR['generic_error']), + # REST modify error. + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['net_routes_record']), + ('DELETE', 'network/ip/routes/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('POST', 'network/ip/routes', SRR['generic_error']), + ('POST', 'network/ip/routes', SRR['success']), + ]) + net_route_obj = create_module(net_route_module, DEFAULT_ARGS) + assert 'Error fetching net route' in expect_and_capture_ansible_exception(net_route_obj.get_net_route, 'fail')['msg'] + assert 'Error creating net route' in expect_and_capture_ansible_exception(net_route_obj.create_net_route, 'fail')['msg'] + current = {'destination': '', 'gateway': ''} + assert 'Error deleting net route' in expect_and_capture_ansible_exception(net_route_obj.delete_net_route, 'fail', current)['msg'] + error = 'Error modifying net route' + assert error in create_and_apply(net_route_module, DEFAULT_ARGS, {'metric': 80}, fail=True)['msg'] + + net_route_obj = create_module(net_route_module, DEFAULT_ARGS, {'use_rest': 'always'}) + assert 'Error fetching net route' in expect_and_capture_ansible_exception(net_route_obj.get_net_route, 'fail')['msg'] + assert 'Error creating net route' in expect_and_capture_ansible_exception(net_route_obj.create_net_route, 'fail')['msg'] + current = {'uuid': '12345'} + assert 'Error deleting net route' in expect_and_capture_ansible_exception(net_route_obj.delete_net_route, 'fail', current)['msg'] + assert error in create_and_apply(net_route_module, DEFAULT_ARGS, {'metric': 80, 'use_rest': 'always'}, fail=True)['msg'] + + +def test_rest_successfully_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['empty_records']), + ('POST', 'network/ip/routes', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['net_routes_record']) + ]) + assert create_and_apply(net_route_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_rest_successfully_create_cluster_scope(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['empty_records']), + ('POST', 'network/ip/routes', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['net_routes_cluster']), + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['vserver'] + assert create_and_apply(net_route_module, DEFAULT_ARGS_COPY, {'use_rest': 'always'})['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS_COPY, {'use_rest': 'always'})['changed'] + + +def test_rest_successfully_destroy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['net_routes_record']), + ('DELETE', 'network/ip/routes/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['empty_records']), + ]) + args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + + +def test_rest_successfully_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['empty_records']), + ('GET', 'network/ip/routes', SRR['net_routes_record']), + ('DELETE', 'network/ip/routes/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['success']), + ('POST', 'network/ip/routes', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['modified_record']) + ]) + args = {'use_rest': 'always', 'metric': '90', 'from_destination': '176.0.0.0/24', 'destination': '0.0.0.0/24'} + assert create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(net_route_module, DEFAULT_ARGS, args)['changed'] + + +def test_rest_negative_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'network/ip/routes', SRR['empty_records']), + ('GET', 'network/ip/routes', SRR['empty_records']) + ]) + error = 'Error modifying: route 176.0.0.0/24 does not exist' + args = {'use_rest': 'auto', 'from_destination': '176.0.0.0/24'} + assert error in create_and_apply(net_route_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_negative_zapi_no_netapp_lib(mock_has_lib): + mock_has_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required.' + assert msg in create_module(net_route_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_negative_non_supported_option(): + error = "REST API currently does not support 'from_metric'" + args = {'use_rest': 'always', 'from_metric': 23} + assert error in create_module(net_route_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_negative_zapi_requires_vserver(): + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['vserver'] + error = "Error: vserver is a required parameter when using ZAPI" + assert error in create_module(net_route_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + +def test_negative_dest_format(): + error = "Error: Expecting '/' in '1.2.3.4'." + assert error in create_module(net_route_module, DEFAULT_ARGS, {'destination': '1.2.3.4'}, fail=True)['msg'] + + +def test_negative_from_dest_format(): + args = {'destination': '1.2.3.4', 'from_destination': '5.6.7.8'} + error_msg = create_module(net_route_module, DEFAULT_ARGS, args, fail=True)['msg'] + msg = "Error: Expecting '/' in '1.2.3.4'." + assert msg in error_msg + msg = "Expecting '/' in '5.6.7.8'." + assert msg in error_msg diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py new file mode 100644 index 000000000..ac284c8d7 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_subnet.py @@ -0,0 +1,275 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_subnet \ + import NetAppOntapSubnet as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + 'name': 'test_subnet', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'broadcast_domain': 'Default', + 'gateway': '10.0.0.1', + 'ipspace': 'Default', + 'subnet': '10.0.0.0/24', + 'ip_ranges': ['10.0.0.10-10.0.0.20', '10.0.0.30'], + 'use_rest': 'never' +} + + +def subnet_info(name): + return { + 'num-records': 1, + 'attributes-list': { + 'net-subnet-info': { + 'broadcast-domain': DEFAULT_ARGS['broadcast_domain'], + 'gateway': DEFAULT_ARGS['gateway'], + 'ip-ranges': [{'ip-range': elem} for elem in DEFAULT_ARGS['ip_ranges']], + 'ipspace': DEFAULT_ARGS['ipspace'], + 'subnet': DEFAULT_ARGS['subnet'], + 'subnet-name': name, + } + } + } + + +ZRR = zapi_responses({ + 'subnet_info': build_zapi_response(subnet_info(DEFAULT_ARGS['name'])), + 'subnet_info_renamed': build_zapi_response(subnet_info('new_test_subnet')), +}) + + +SRR = rest_responses({ + 'subnet_info': (200, {"records": [{ + "uuid": "82610703", + "name": "test_subnet", + "ipspace": {"name": "Default"}, + "gateway": "10.0.0.1", + "broadcast_domain": {"name": "Default"}, + "subnet": {"address": "10.0.0.0", "netmask": "24", "family": "ipv4"}, + "available_ip_ranges": [ + {"start": "10.0.0.10", "end": "10.0.0.20", "family": "ipv4"}, + {"start": "10.0.0.30", "end": "10.0.0.30", "family": "ipv4"} + ] + }], "num_records": 1}, None), + 'subnet_info_renamed': (200, {"records": [{ + "uuid": "82610703", + "name": "new_test_subnet", + "ipspace": {"name": "Default"}, + "gateway": "10.0.0.1", + "broadcast_domain": {"name": "Default"}, + "subnet": {"address": "10.0.0.0", "netmask": "24", "family": "ipv4"}, + "available_ip_ranges": [ + {"start": "10.0.0.10", "end": "10.0.0.20", "family": "ipv4"}, + {"start": "10.0.0.30", "end": "10.0.0.30", "family": "ipv4"} + ] + }], "num_records": 1}, None) +}) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] + + +def test_successful_create(): + register_responses([ + + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + ('ZAPI', 'net-subnet-create', ZRR['success']), + # idempotency + + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ]) + assert call_main(my_main, DEFAULT_ARGS)['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS)['changed'] + + +def test_successful_delete(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ('ZAPI', 'net-subnet-destroy', ZRR['success']), + # idempotency + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + ]) + assert call_main(my_main, DEFAULT_ARGS, {'state': 'absent'})['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_successful_modify(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ('ZAPI', 'net-subnet-modify', ZRR['success']), + # idempotency + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ]) + module_args = {'ip_ranges': ['10.0.0.10-10.0.0.25', '10.0.0.30']} + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + module_args.pop('ip_ranges') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_rename(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ('ZAPI', 'net-subnet-rename', ZRR['success']), + # idempotency + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ]) + module_args = {'from_name': DEFAULT_ARGS['name'], 'name': 'new_test_subnet'} + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_modify_broadcast_domain(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['subnet_info']), + ]) + module_args = {'broadcast_domain': 'cannot change'} + error = 'Error modifying subnet test_subnet: cannot modify broadcast_domain parameter, desired "cannot change", currrent "Default"' + assert error == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rename(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + ]) + module_args = {'from_name': DEFAULT_ARGS['name'], 'name': 'new_test_subnet'} + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == 'Error renaming: subnet test_subnet does not exist' + + +def test_negative_create(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + # second test + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + # third test + ('ZAPI', 'net-subnet-get-iter', ZRR['no_records']), + ]) + args = dict(DEFAULT_ARGS) + args.pop('subnet') + assert call_main(my_main, args, fail=True)['msg'] == 'Error - missing required arguments: subnet.' + args = dict(DEFAULT_ARGS) + args.pop('broadcast_domain') + assert call_main(my_main, args, fail=True)['msg'] == 'Error - missing required arguments: broadcast_domain.' + args.pop('subnet') + assert call_main(my_main, args, fail=True)['msg'] == 'Error - missing required arguments: subnet.' + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'net-subnet-get-iter', ZRR['error']), + ('ZAPI', 'net-subnet-create', ZRR['error']), + ('ZAPI', 'net-subnet-destroy', ZRR['error']), + ('ZAPI', 'net-subnet-modify', ZRR['error']), + ('ZAPI', 'net-subnet-rename', ZRR['error']), + # REST exception + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['generic_error']), + ('POST', 'network/ip/subnets', SRR['generic_error']), + ('PATCH', 'network/ip/subnets/82610703', SRR['generic_error']), + ('DELETE', 'network/ip/subnets/82610703', SRR['generic_error']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert zapi_error_message('Error fetching subnet test_subnet') == expect_and_capture_ansible_exception(my_obj.get_subnet, 'fail')['msg'] + assert zapi_error_message('Error creating subnet test_subnet') == expect_and_capture_ansible_exception(my_obj.create_subnet, 'fail')['msg'] + assert zapi_error_message('Error deleting subnet test_subnet') == expect_and_capture_ansible_exception(my_obj.delete_subnet, 'fail')['msg'] + assert zapi_error_message('Error modifying subnet test_subnet') == expect_and_capture_ansible_exception(my_obj.modify_subnet, 'fail', {})['msg'] + assert zapi_error_message('Error renaming subnet test_subnet') == expect_and_capture_ansible_exception(my_obj.rename_subnet, 'fail')['msg'] + my_obj = create_module(my_module, DEFAULT_ARGS, {'use_rest': 'always'}) + my_obj.uuid = '82610703' + assert 'Error fetching subnet test_subnet' in expect_and_capture_ansible_exception(my_obj.get_subnet, 'fail')['msg'] + assert 'Error creating subnet test_subnet' in expect_and_capture_ansible_exception(my_obj.create_subnet, 'fail')['msg'] + assert 'Error modifying subnet test_subnet' in expect_and_capture_ansible_exception(my_obj.modify_subnet, 'fail', {})['msg'] + assert 'Error deleting subnet test_subnet' in expect_and_capture_ansible_exception(my_obj.delete_subnet, 'fail')['msg'] + modify = {'subnet': '192.168.1.2'} + assert 'Error: Invalid value specified for subnet' in expect_and_capture_ansible_exception(my_obj.form_create_modify_body_rest, 'fail', modify)['msg'] + + +def test_successful_create_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['empty_records']), + ('POST', 'network/ip/subnets', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['subnet_info']), + ]) + assert call_main(my_main, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_successful_modify_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['subnet_info']), + ('PATCH', 'network/ip/subnets/82610703', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['subnet_info']) + ]) + module_args = {'ip_ranges': ['10.0.0.10-10.0.0.25', '10.0.0.30'], 'use_rest': 'always'} + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + module_args.pop('ip_ranges') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_rename_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['empty_records']), + ('GET', 'network/ip/subnets', SRR['subnet_info']), + ('PATCH', 'network/ip/subnets/82610703', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['subnet_info_renamed']), + ]) + module_args = {'from_name': DEFAULT_ARGS['name'], 'name': 'new_test_subnet', 'use_rest': 'always'} + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['subnet_info']), + ('DELETE', 'network/ip/subnets/82610703', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'network/ip/subnets', SRR['empty_records']), + ]) + assert call_main(my_main, DEFAULT_ARGS, {'state': 'absent', 'use_rest': 'always'})['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS, {'state': 'absent', 'use_rest': 'always'})['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_vlan.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_vlan.py new file mode 100644 index 000000000..bbcb9e7fe --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_net_vlan.py @@ -0,0 +1,252 @@ +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP net vlan Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import sys +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_net_vlan \ + import NetAppOntapVlan as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def default_args(): + args = { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_7': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'vlan_record': (200, { + "num_records": 1, + "records": [{ + 'broadcast_domain': { + 'ipspace': {'name': 'Default'}, + 'name': 'test1' + }, + 'enabled': True, + 'name': 'e0c-15', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': '97936a14-30de-11ec-ac4d-005056b3d8c8' + }] + }, None), + 'vlan_record_create': (200, { + "num_records": 1, + "records": [{ + 'broadcast_domain': { + 'ipspace': {'name': 'Default'}, + 'name': 'test2' + }, + 'enabled': True, + 'name': 'e0c-16', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': '97936a14-30de-11ec-ac4d-005056b3d8c8' + }] + }, None), + 'vlan_record_modify': (200, { + "num_records": 1, + "records": [{ + 'broadcast_domain': { + 'ipspace': {'name': 'Default'}, + 'name': 'test1' + }, + 'enabled': False, + 'name': 'e0c-16', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': '97936a14-30de-11ec-ac4d-005056b3d8c8' + }] + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_when_required_args_missing_ONTAP96(mock_request, patch_ansible): + ''' required arguments are reported as errors for ONTAP 9.6''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 154 + args['parent_interface'] = 'e0c' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_6'] # get version + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'broadcast_domain and ipspace are required fields with ONTAP 9.6 and 9.7' + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_fail_when_required_args_missing_ONTAP97(mock_request, patch_ansible): + ''' required arguments are reported as errors for ONTAP 9.7''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 154 + args['parent_interface'] = 'e0c' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_7'] # get version + ] + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'broadcast_domain and ipspace are required fields with ONTAP 9.6 and 9.7' + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_get_vlan_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 15 + args['parent_interface'] = 'e0c' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['vlan_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_create_vlan_called(mock_request, patch_ansible): + ''' test create''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 16 + args['parent_interface'] = 'e0c' + args['broadcast_domain'] = 'test2' + args['ipspace'] = 'Default' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['empty_good'], # create + SRR['vlan_record_create'], # get created vlan record to check PATCH call required + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_modify_vlan_called(mock_request, patch_ansible): + ''' test modify''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 16 + args['parent_interface'] = 'e0c' + args['broadcast_domain'] = 'test1' + args['ipspace'] = 'Default' + args['enabled'] = 'no' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['vlan_record_create'], # get + SRR['empty_good'], # patch call + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_vlan_called(mock_request, patch_ansible): + ''' test delete''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 15 + args['parent_interface'] = 'e0c' + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['vlan_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_vlan_idempotent(mock_request, patch_ansible): + ''' test delete idempotent''' + args = dict(default_args()) + args['node'] = 'mohan9cluster2-01' + args['vlanid'] = 15 + args['parent_interface'] = 'e0c' + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py new file mode 100644 index 000000000..116f25f06 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs.py @@ -0,0 +1,338 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, assert_no_warnings_except_zapi, assert_warning_was_raised, call_main, create_and_apply, print_warnings, set_module_args,\ + AnsibleExitJson, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs \ + import NetAppONTAPNFS as nfs_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +nfs_info = { + "attributes-list": { + "nfs-info": { + "auth-sys-extended-groups": "false", + "cached-cred-harvest-timeout": "86400000", + "cached-cred-negative-ttl": "7200000", + "cached-cred-positive-ttl": "86400000", + "cached-transient-err-ttl": "30000", + "chown-mode": "use_export_policy", + "enable-ejukebox": "true", + "extended-groups-limit": "32", + "file-session-io-grouping-count": "5000", + "file-session-io-grouping-duration": "120", + "ignore-nt-acl-for-root": "false", + "is-checksum-enabled-for-replay-cache": "true", + "is-mount-rootonly-enabled": "true", + "is-netgroup-dns-domain-search": "true", + "is-nfs-access-enabled": "false", + "is-nfs-rootonly-enabled": "false", + "is-nfsv2-enabled": "false", + "is-nfsv3-64bit-identifiers-enabled": "false", + "is-nfsv3-connection-drop-enabled": "true", + "is-nfsv3-enabled": "true", + "is-nfsv3-fsid-change-enabled": "true", + "is-nfsv4-fsid-change-enabled": "true", + "is-nfsv4-numeric-ids-enabled": "true", + "is-nfsv40-acl-enabled": "false", + "is-nfsv40-enabled": "true", + "is-nfsv40-migration-enabled": "false", + "is-nfsv40-read-delegation-enabled": "false", + "is-nfsv40-referrals-enabled": "false", + "is-nfsv40-req-open-confirm-enabled": "false", + "is-nfsv40-write-delegation-enabled": "false", + "is-nfsv41-acl-enabled": "false", + "is-nfsv41-acl-preserve-enabled": "true", + "is-nfsv41-enabled": "true", + "is-nfsv41-migration-enabled": "false", + "is-nfsv41-pnfs-enabled": "true", + "is-nfsv41-read-delegation-enabled": "false", + "is-nfsv41-referrals-enabled": "false", + "is-nfsv41-state-protection-enabled": "true", + "is-nfsv41-write-delegation-enabled": "false", + "is-qtree-export-enabled": "false", + "is-rquota-enabled": "false", + "is-tcp-enabled": "false", + "is-udp-enabled": "false", + "is-v3-ms-dos-client-enabled": "false", + "is-validate-qtree-export-enabled": "true", + "is-vstorage-enabled": "false", + "map-unknown-uid-to-default-windows-user": "true", + "mountd-port": "635", + "name-service-lookup-protocol": "udp", + "netgroup-trust-any-ns-switch-no-match": "false", + "nfsv4-acl-max-aces": "400", + "nfsv4-grace-seconds": "45", + "nfsv4-id-domain": "defaultv4iddomain.com", + "nfsv4-lease-seconds": "30", + "nfsv41-implementation-id-domain": "netapp.com", + "nfsv41-implementation-id-name": "NetApp Release Kalyaniblack__9.4.0", + "nfsv41-implementation-id-time": "1541070767", + "nfsv4x-session-num-slots": "180", + "nfsv4x-session-slot-reply-cache-size": "640", + "nlm-port": "4045", + "nsm-port": "4046", + "ntacl-display-permissive-perms": "false", + "ntfs-unix-security-ops": "use_export_policy", + "permitted-enc-types": { + "string": ["des", "des3", "aes_128", "aes_256"] + }, + "rpcsec-ctx-high": "0", + "rpcsec-ctx-idle": "0", + "rquotad-port": "4049", + "showmount": "true", + "showmount-timestamp": "1548372452", + "skip-root-owner-write-perm-check": "false", + "tcp-max-xfer-size": "1048576", + "udp-max-xfer-size": "32768", + "v3-search-unconverted-filename": "false", + "v4-inherited-acl-preserve": "false", + "vserver": "ansible" + } + }, + "num-records": "1" +} + +nfs_info_no_tcp_max_xfer_size = copy.deepcopy(nfs_info) +del nfs_info_no_tcp_max_xfer_size['attributes-list']['nfs-info']['tcp-max-xfer-size'] + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None, job_error=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'nfs': + xml = self.build_nfs_info(self.params) + self.xml_out = xml + if self.kind == 'nfs_status': + xml = self.build_nfs_status_info(self.params) + return xml + + @staticmethod + def build_nfs_info(nfs_details): + ''' build xml data for volume-attributes ''' + xml = netapp_utils.zapi.NaElement('xml') + xml.translate_struct(nfs_info) + return xml + + @staticmethod + def build_nfs_status_info(nfs_status_details): + ''' build xml data for volume-attributes ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'is-enabled': "true" + } + xml.translate_struct(attributes) + return xml + + +DEFAULT_ARGS = { + 'vserver': 'nfs_vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': 'false', + 'use_rest': 'never' +} + + +SRR = zapi_responses({ + 'nfs_info': build_zapi_response(nfs_info), + 'nfs_info_no_tcp_max_xfer_size': build_zapi_response(nfs_info_no_tcp_max_xfer_size) +}) + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_nfs_group = { + 'vserver': DEFAULT_ARGS['vserver'], + } + + def mock_args(self): + return dict(DEFAULT_ARGS) + + def get_nfs_mock_object(self, kind=None): + """ + Helper method to return an na_ontap_volume object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_volume object + """ + nfsy_obj = nfs_module() + nfsy_obj.asup_log_for_cserver = Mock(return_value=None) + nfsy_obj.cluster = Mock() + nfsy_obj.cluster.invoke_successfully = Mock() + if kind is None: + nfsy_obj.server = MockONTAPConnection() + else: + nfsy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_nfs_group) + return nfsy_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + error = 'missing required arguments' + assert error in call_main(my_main, {}, fail=True)['msg'] + + def test_get_nonexistent_nfs(self): + ''' Test if get_nfs_service returns None for non-existent nfs ''' + set_module_args(self.mock_args()) + result = self.get_nfs_mock_object().get_nfs_service() + assert result is None + + def test_get_existing_nfs(self): + ''' Test if get_policy_group returns details for existing nfs ''' + set_module_args(self.mock_args()) + result = self.get_nfs_mock_object('nfs').get_nfs_service() + assert result['nfsv3'] + + def test_get_nonexistent_nfs_status(self): + ''' Test if get__nfs_status returns None for non-existent nfs ''' + set_module_args(self.mock_args()) + result = self.get_nfs_mock_object().get_nfs_status() + assert result is None + + def test_get_existing_nfs_status(self): + ''' Test if get__nfs_status returns details for nfs ''' + set_module_args(self.mock_args()) + result = self.get_nfs_mock_object('nfs_status').get_nfs_status() + assert result + + def test_modify_nfs(self): + ''' Test if modify_nfs runs for existing nfs ''' + data = self.mock_args() + current = { + 'nfsv3': 'enabled', + 'nfsv3_fsid_change': 'enabled', + 'nfsv4': 'enabled', + 'nfsv41': 'enabled', + 'vstorage_state': 'enabled', + 'tcp': 'enabled', + 'udp': 'enabled', + 'nfsv4_id_domain': 'nfsv4_id_domain', + 'nfsv40_acl': 'enabled', + 'nfsv40_read_delegation': 'enabled', + 'nfsv40_write_delegation': 'enabled', + 'nfsv41_acl': 'enabled', + 'nfsv41_read_delegation': 'enabled', + 'nfsv41_write_delegation': 'enabled', + 'showmount': 'enabled', + 'tcp_max_xfer_size': '1048576', + } + + data.update(current) + set_module_args(data) + self.get_nfs_mock_object('nfs_status').modify_nfs_service(current) + + def test_successfully_modify_nfs(self): + ''' Test modify nfs successful for modifying tcp max xfer size. ''' + data = self.mock_args() + data['tcp_max_xfer_size'] = 8192 + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_nfs_mock_object('nfs').apply() + assert exc.value.args[0]['changed'] + + def test_modify_nfs_idempotency(self): + ''' Test modify nfs idempotency ''' + data = self.mock_args() + data['tcp_max_xfer_size'] = '1048576' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_nfs_mock_object('nfs').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs.NetAppONTAPNFS.delete_nfs_service') + def test_successfully_delete_nfs(self, delete_nfs_service): + ''' Test successfully delete nfs ''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + obj = self.get_nfs_mock_object('nfs') + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + delete_nfs_service.assert_called_with() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs.NetAppONTAPNFS.get_nfs_service') + def test_successfully_enable_nfs(self, get_nfs_service): + ''' Test successfully enable nfs on non-existent nfs ''' + data = self.mock_args() + data['state'] = 'present' + set_module_args(data) + get_nfs_service.side_effect = [ + None, + {} + ] + obj = self.get_nfs_mock_object('nfs') + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert exc.value.args[0]['changed'] + + +def test_modify_tcp_max_xfer_size(): + ''' if ZAPI returned a None value, a modify is attempted ''' + register_responses([ + # ONTAP 9.4 and later, tcp_max_xfer_size is an INT + ('ZAPI', 'nfs-service-get-iter', SRR['nfs_info']), + ('ZAPI', 'nfs-status', SRR['success']), + ('ZAPI', 'nfs-service-modify', SRR['success']), + # ONTAP 9.4 and later, tcp_max_xfer_size is an INT, idempotency + ('ZAPI', 'nfs-service-get-iter', SRR['nfs_info']), + # ONTAP 9.3 and earlier, tcp_max_xfer_size is not set + ('ZAPI', 'nfs-service-get-iter', SRR['nfs_info_no_tcp_max_xfer_size']), + ]) + module_args = { + 'tcp_max_xfer_size': 4500 + } + assert create_and_apply(nfs_module, DEFAULT_ARGS, module_args)['changed'] + module_args = { + 'tcp_max_xfer_size': 1048576 + } + assert not create_and_apply(nfs_module, DEFAULT_ARGS, module_args)['changed'] + error = 'Error: tcp_max_xfer_size is not supported on ONTAP 9.3 or earlier.' + assert create_and_apply(nfs_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + assert_no_warnings_except_zapi() + + +def test_warning_on_nfsv41_alias(): + ''' if ZAPI returned a None value, a modify is attempted ''' + register_responses([ + # ONTAP 9.4 and later, tcp_max_xfer_size is an INT + ('ZAPI', 'nfs-service-get-iter', SRR['nfs_info']), + ('ZAPI', 'nfs-status', SRR['success']), + ('ZAPI', 'nfs-service-modify', SRR['success']), + ]) + module_args = { + 'nfsv4.1': 'disabled' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('Error: "nfsv4.1" option conflicts with Ansible naming conventions - please use "nfsv41".') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs_rest.py new file mode 100644 index 000000000..995dbeb6f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nfs_rest.py @@ -0,0 +1,324 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + AnsibleFailJson, AnsibleExitJson, patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nfs \ + import NetAppONTAPNFS as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request. +# The rest_factory provides default responses shared across testcases. +SRR = rest_responses({ + # module specific responses + 'one_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "transport": { + "udp_enabled": True, + "tcp_enabled": True + }, + "protocol": { + "v3_enabled": True, + "v4_id_domain": "carchi8py.com", + "v40_enabled": False, + "v41_enabled": False, + "v40_features": { + "acl_enabled": False, + "read_delegation_enabled": False, + "write_delegation_enabled": False + }, + "v41_features": { + "acl_enabled": False, + "read_delegation_enabled": False, + "write_delegation_enabled": False, + "pnfs_enabled": False + } + }, + "vstorage_enabled": False, + "showmount_enabled": True, + "root": { + "ignore_nt_acl": False, + "skip_write_permission_check": False + }, + "security": { + "chown_mode": "restricted", + "nt_acl_display_permission": False, + "ntfs_unix_security": "fail", + "permitted_encryption_types": ["des3"], + "rpcsec_context_idle": 5 + }, + "windows":{ + "v3_ms_dos_client_enabled": False, + "map_unknown_uid_to_default_user": True, + "default_user": "test_user" + }, + "tcp_max_xfer_size": "16384" + } + ]}, None), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansibleSVM', + 'use_rest': 'always', +} + + +def set_default_args(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansibleSVM', + 'use_rest': 'always', + }) + + +def test_get_nfs_rest_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/nfs/services', SRR['empty_records']) + ]) + set_module_args(set_default_args()) + my_obj = my_module() + assert my_obj.get_nfs_service_rest() is None + + +def test_partially_supported_rest(): + register_responses([('GET', 'cluster', SRR['is_rest_96'])]) + module_args = set_default_args() + module_args['showmount'] = 'enabled' + set_module_args(module_args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = "Error: Minimum version of ONTAP for showmount is (9, 8)." + assert msg in exc.value.args[0]['msg'] + + +def test_get_nfs_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/nfs/services', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error getting nfs services for SVM ansibleSVM: calling: protocols/nfs/services: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_nfs_service_rest, 'fail')['msg'] + + +def test_get_nfs_rest_one_record(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/nfs/services', SRR['one_record']) + ]) + set_module_args(set_default_args()) + my_obj = my_module() + assert my_obj.get_nfs_service_rest() is not None + + +def test_create_nfs(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/nfs/services', SRR['empty_records']), + ('POST', 'protocols/nfs/services', SRR['empty_good']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {})['changed'] + + +def test_create_nfs_all_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'protocols/nfs/services', SRR['empty_good']) + ]) + set_module_args(set_default_args()) + my_obj = my_module() + my_obj.parameters['nfsv3'] = True + my_obj.parameters['nfsv4'] = False + my_obj.parameters['nfsv41'] = False + my_obj.parameters['nfsv41_pnfs'] = False + my_obj.parameters['vstorage_state'] = False + my_obj.parameters['nfsv4_id_domain'] = 'carchi8py.com' + my_obj.parameters['tcp'] = True + my_obj.parameters['udp'] = True + my_obj.parameters['nfsv40_acl'] = False + my_obj.parameters['nfsv40_read_delegation'] = False + my_obj.parameters['nfsv40_write_delegation'] = False + my_obj.parameters['nfsv41_acl'] = False + my_obj.parameters['nfsv41_read_delegation'] = False + my_obj.parameters['nfsv41_write_delegation'] = False + my_obj.parameters['showmount'] = True + my_obj.parameters['service_state'] = 'stopped' + my_obj.create_nfs_service_rest() + assert get_mock_record().is_record_in_json({'svm.name': 'ansibleSVM'}, 'POST', 'protocols/nfs/services') + + +def test_create_nfs_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'protocols/nfs/services', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error creating nfs service for SVM ansibleSVM: calling: protocols/nfs/services: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.create_nfs_service_rest, 'fail')['msg'] + + +def test_delete_nfs(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/nfs/services', SRR['one_record']), + ('DELETE', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_nfs_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('DELETE', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']) + ]) + set_module_args(set_default_args()) + my_obj = my_module() + my_obj.parameters['state'] = 'absent' + my_obj.svm_uuid = '671aa46e-11ad-11ec-a267-005056b30cfa' + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_nfs_service_rest() + print('Info: %s' % exc.value.args[0]['msg']) + msg = "Error deleting nfs service for SVM ansibleSVM" + assert msg == exc.value.args[0]['msg'] + + +def test_delete_nfs_no_uuid_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + module_args = { + 'state': 'absent' + } + my_module_object = create_module(my_module, DEFAULT_ARGS, module_args) + msg = "Error deleting nfs service for SVM ansibleSVM: svm.uuid is None" + assert msg in expect_and_capture_ansible_exception(my_module_object.delete_nfs_service_rest, 'fail')['msg'] + + +def test_modify_nfs(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/nfs/services', SRR['one_record']), + ('PATCH', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + set_module_args(set_default_args()) + my_obj = my_module() + my_obj.parameters['nfsv3'] = 'disabled' + my_obj.svm_uuid = '671aa46e-11ad-11ec-a267-005056b30cfa' + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + +def test_modify_nfs_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']) + ]) + set_module_args(set_default_args()) + my_obj = my_module() + my_obj.parameters['nfsv3'] = 'disabled' + my_obj.svm_uuid = '671aa46e-11ad-11ec-a267-005056b30cfa' + modify = {'nfsv3': False} + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_nfs_service_rest(modify) + print('Info: %s' % exc.value.args[0]['msg']) + msg = "Error modifying nfs service for SVM ansibleSVM: calling: protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa: got Expected error." + assert msg == exc.value.args[0]['msg'] + + +def test_modify_nfs_no_uuid_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + set_module_args(set_default_args()) + my_obj = my_module() + my_obj.parameters['nfsv3'] = 'disabled' + modify = {'nfsv3': False} + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_nfs_service_rest(modify) + print('Info: %s' % exc.value.args[0]['msg']) + msg = "Error modifying nfs service for SVM ansibleSVM: svm.uuid is None" + assert msg == exc.value.args[0]['msg'] + + +def test_modify_nfs_root(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/nfs/services', SRR['one_record']), + ('PATCH', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['success']) + ]) + module_args = { + "root": + { + "ignore_nt_acl": True, + "skip_write_permission_check": True + } + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_nfs_security(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/nfs/services', SRR['one_record']), + ('PATCH', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['success']) + ]) + module_args = { + "security": + { + "chown_mode": "restricted", + "nt_acl_display_permission": "true", + "ntfs_unix_security": "fail", + "permitted_encryption_types": ["des3"], + "rpcsec_context_idle": 5 + } + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_nfs_windows(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_0']), + ('GET', 'protocols/nfs/services', SRR['one_record']), + ('PATCH', 'protocols/nfs/services/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['success']) + ]) + module_args = { + "windows": + { + "v3_ms_dos_client_enabled": True, + "map_unknown_uid_to_default_user": False, + "default_user": "test_user" + }, + "tcp_max_xfer_size": "16384" + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_node.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_node.py new file mode 100644 index 000000000..d29c5c64f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_node.py @@ -0,0 +1,222 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_node \ + import NetAppOntapNode as node_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'node_record': (200, {"num_records": 1, "records": [ + { + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7", + "name": 'node1', + "location": 'myloc'} + ]}, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.type = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'node': + xml = self.build_node_info() + elif self.type == 'node_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_node_info(): + ''' build xml data for node-details-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'attributes': { + 'node-details-info': { + "node": "node1", + "node-location": "myloc", + "node-asset-tag": "mytag" + } + } + } + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def set_default_args(self, use_rest=None): + hostname = '10.10.10.10' + username = 'username' + password = 'password' + name = 'node1' + + args = dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'name': name, + 'location': 'myloc' + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_node_mock_object(cx_type='zapi', kind=None): + node_obj = node_module() + if cx_type == 'zapi': + if kind is None: + node_obj.server = MockONTAPConnection() + else: + node_obj.server = MockONTAPConnection(kind=kind) + return node_obj + + def test_ensure_get_called(self): + ''' test get_node for non-existent entry''' + set_module_args(self.set_default_args(use_rest='Never')) + print('starting') + my_obj = node_module() + print('use_rest:', my_obj.use_rest) + my_obj.cluster = MockONTAPConnection('node') + assert my_obj.get_node is not None + + def test_successful_rename(self): + ''' renaming node and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['from_name'] = 'node1' + data['name'] = 'node2' + set_module_args(data) + my_obj = node_module() + my_obj.cluster = MockONTAPConnection('node') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + data['name'] = 'node1' + set_module_args(data) + my_obj = node_module() + my_obj.cluster = MockONTAPConnection('node') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_successful_modify(self): + ''' modifying node and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['location'] = 'myloc1' + set_module_args(data) + my_obj = node_module() + my_obj.cluster = MockONTAPConnection('node') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + data['location'] = 'myloc' + set_module_args(data) + my_obj = node_module() + my_obj.cluster = MockONTAPConnection('node') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + data['from_name'] = 'node1' + data['name'] = 'node2' + set_module_args(data) + my_obj = node_module() + my_obj.cluster = MockONTAPConnection('node_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.rename_node() + assert 'Error renaming node: ' in exc.value.args[0]['msg'] + data = self.set_default_args(use_rest='Never') + data['location'] = 'myloc1' + set_module_args(data) + my_obj1 = node_module() + my_obj1.cluster = MockONTAPConnection('node_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj1.modify_node() + assert 'Error modifying node: ' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_rest(self, mock_request): + data = self.set_default_args() + data['from_name'] = 'node2' + data['location'] = 'mylocnew' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['node_record'], # get + SRR['empty_good'], # no response for modify + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_node_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_rename_rest(self, mock_request): + data = self.set_default_args() + data['from_name'] = 'node' + data['name'] = 'node2' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['node_record'], # get + SRR['empty_good'], # no response for modify + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_node_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_modify_location_rest(self, mock_request): + data = self.set_default_args() + data['location'] = 'mylocnew' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['node_record'], # get + SRR['empty_good'], # no response for modify + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_node_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py new file mode 100644 index 000000000..da8e15ffc --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_dacl.py @@ -0,0 +1,232 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_ntfs_dacl''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl \ + import NetAppOntapNtfsDacl as dacl_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') +HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required" + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + request = xml.to_string().decode('utf-8') + if self.kind == 'error': + raise netapp_utils.zapi.NaApiError('test', 'expect error') + elif request.startswith(""): + xml = None # or something that may the logger happy, and you don't need @patch anymore + # or + # xml = build_ems_log_response() + elif request.startswith(""): + if self.kind == 'create': + xml = self.build_dacl_info() + else: + xml = self.build_dacl_info(self.params) + elif request.startswith(""): + xml = self.build_dacl_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_dacl_info(data=None): + xml = netapp_utils.zapi.NaElement('xml') + vserver = 'vserver' + attributes = {'num-records': '0', + 'attributes-list': {'file-directory-security-ntfs-dacl': {'vserver': vserver}}} + + if data is not None: + attributes['num-records'] = '1' + if data.get('access_type'): + attributes['attributes-list']['file-directory-security-ntfs-dacl']['access-type'] = data['access_type'] + if data.get('account'): + attributes['attributes-list']['file-directory-security-ntfs-dacl']['account'] = data['account'] + if data.get('rights'): + attributes['attributes-list']['file-directory-security-ntfs-dacl']['rights'] = data['rights'] + if data.get('advanced_rights'): + attributes['attributes-list']['file-directory-security-ntfs-dacl']['advanced-rights'] = data['advanced_rights'] + if data.get('apply_to'): + tmp = [] + for target in data['apply_to']: + tmp.append({'inheritance-level': target}) + attributes['attributes-list']['file-directory-security-ntfs-dacl']['apply-to'] = tmp + if data.get('security_descriptor'): + attributes['attributes-list']['file-directory-security-ntfs-dacl']['ntfs-sd'] = data['security_descriptor'] + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_ntfs_dacl ''' + + def mock_args(self): + return { + 'vserver': 'vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_dacl_mock_object(self, type='zapi', kind=None, status=None): + dacl_obj = dacl_module() + dacl_obj.autosupport_log = Mock(return_value=None) + if type == 'zapi': + if kind is None: + dacl_obj.server = MockONTAPConnection() + else: + dacl_obj.server = MockONTAPConnection(kind=kind, data=status) + return dacl_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + dacl_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_dacl_error(self): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + data['apply_to'] = 'this_folder,files' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_dacl_mock_object('zapi', 'error', data).apply() + msg = 'Error fetching allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error' + assert exc.value.args[0]['msg'] == msg + + def test_successfully_create_dacl(self): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + data['apply_to'] = 'this_folder,files' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_dacl_mock_object('zapi', 'create', data).apply() + assert exc.value.args[0]['changed'] + + def test_create_dacl_idempotency(self): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + data['apply_to'] = ['this_folder', 'files'] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_dacl_mock_object('zapi', 'create_idempotency', data).apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_modify_dacl(self): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + data['apply_to'] = ['this_folder', 'files'] + set_module_args(data) + data['advanced_rights'] = 'read_data,write_data' + with pytest.raises(AnsibleExitJson) as exc: + self.get_dacl_mock_object('zapi', 'create', data).apply() + assert exc.value.args[0]['changed'] + + def test_modify_dacl_idempotency(self): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + data['apply_to'] = ['this_folder', 'files'] + set_module_args(data) + data['rights'] = 'full_control' + with pytest.raises(AnsibleExitJson) as exc: + self.get_dacl_mock_object('zapi', 'modify_idempotency', data).apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl.NetAppOntapNtfsDacl.get_dacl') + def test_modify_error(self, get_info): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + set_module_args(data) + get_info.side_effect = [ + { + 'access_type': 'allow', + 'account': 'acc_test', + 'security_descriptor': 'sd_test', + 'rights': 'modify', + 'apply_to': ['this_folder', 'files'] + } + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_dacl_mock_object('zapi', 'error', data).apply() + msg = 'Error modifying allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error' + assert exc.value.args[0]['msg'] == msg + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl.NetAppOntapNtfsDacl.get_dacl') + def test_create_error(self, get_info): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + set_module_args(data) + get_info.side_effect = [ + None + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_dacl_mock_object('zapi', 'error', data).apply() + msg = 'Error adding allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error' + assert exc.value.args[0]['msg'] == msg + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_dacl.NetAppOntapNtfsDacl.get_dacl') + def test_delete_error(self, get_info): + data = self.mock_args() + data['access_type'] = 'allow' + data['account'] = 'acc_test' + data['rights'] = 'full_control' + data['security_descriptor'] = 'sd_test' + data['state'] = 'absent' + set_module_args(data) + get_info.side_effect = [ + { + 'access_type': 'allow', + 'account': 'acc_test', + 'security_descriptor': 'sd_test', + 'rights': 'modify' + } + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_dacl_mock_object('zapi', 'error', data).apply() + msg = 'Error deleting allow DACL for account acc_test for security descriptor sd_test: NetApp API failed. Reason - test:expect error' + assert exc.value.args[0]['msg'] == msg diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py new file mode 100644 index 000000000..6f1f78b34 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntfs_sd.py @@ -0,0 +1,189 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_ntfs_sd''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd \ + import NetAppOntapNtfsSd as sd_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + request = xml.to_string().decode('utf-8') + if request.startswith(""): + xml = None # or something that may the logger happy, and you don't need @patch anymore + # or + # xml = build_ems_log_response() + elif self.kind == 'error': + raise netapp_utils.zapi.NaApiError('test', 'expect error') + elif request.startswith(""): + if self.kind == 'create': + xml = self.build_sd_info() + else: + xml = self.build_sd_info(self.params) + elif request.startswith(""): + xml = self.build_sd_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_sd_info(data=None): + xml = netapp_utils.zapi.NaElement('xml') + vserver = 'vserver' + attributes = {'num-records': 1, + 'attributes-list': {'file-directory-security-ntfs': {'vserver': vserver}}} + if data is not None: + if data.get('name'): + attributes['attributes-list']['file-directory-security-ntfs']['ntfs-sd'] = data['name'] + if data.get('owner'): + attributes['attributes-list']['file-directory-security-ntfs']['owner'] = data['owner'] + if data.get('group'): + attributes['attributes-list']['file-directory-security-ntfs']['group'] = data['group'] + if data.get('control_flags_raw'): + attributes['attributes-list']['file-directory-security-ntfs']['control-flags-raw'] = str(data['control_flags_raw']) + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_ntfs_sd ''' + + def mock_args(self): + return { + 'vserver': 'vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_sd_mock_object(self, type='zapi', kind=None, status=None): + sd_obj = sd_module() + # netapp_utils.ems_log_event = Mock(return_value=None) + if type == 'zapi': + if kind is None: + sd_obj.server = MockONTAPConnection() + else: + sd_obj.server = MockONTAPConnection(kind=kind, data=status) + return sd_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + sd_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_successfully_create_sd(self): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_sd_mock_object('zapi', 'create', data).apply() + assert exc.value.args[0]['changed'] + + def test_create_sd_idempotency(self): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_sd_mock_object('zapi', 'create_idempotency', data).apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_modify_sd(self): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + data['control_flags_raw'] = 1 + set_module_args(data) + data['control_flags_raw'] = 2 + with pytest.raises(AnsibleExitJson) as exc: + self.get_sd_mock_object('zapi', 'create', data).apply() + assert exc.value.args[0]['changed'] + + def test_modify_sd_idempotency(self): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + data['control_flags_raw'] = 2 + set_module_args(data) + data['control_flags_raw'] = 2 + with pytest.raises(AnsibleExitJson) as exc: + self.get_sd_mock_object('zapi', 'modify_idempotency', data).apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd.NetAppOntapNtfsSd.get_ntfs_sd') + def test_modify_error(self, get_info): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + data['control_flags_raw'] = 2 + set_module_args(data) + get_info.side_effect = [ + { + 'name': 'sd_test', + 'control_flags_raw': 1 + } + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_sd_mock_object('zapi', 'error', data).apply() + print(exc) + assert exc.value.args[0]['msg'] == 'Error modifying NTFS security descriptor sd_test: NetApp API failed. Reason - test:expect error' + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd.NetAppOntapNtfsSd.get_ntfs_sd') + def test_create_error(self, get_info): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + set_module_args(data) + get_info.side_effect = [ + None + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_sd_mock_object('zapi', 'error', data).apply() + print(exc) + assert exc.value.args[0]['msg'] == 'Error creating NTFS security descriptor sd_test: NetApp API failed. Reason - test:expect error' + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntfs_sd.NetAppOntapNtfsSd.get_ntfs_sd') + def test_delete_error(self, get_info): + data = self.mock_args() + data['name'] = 'sd_test' + data['owner'] = 'user_test' + data['state'] = 'absent' + set_module_args(data) + get_info.side_effect = [ + { + 'name': 'sd_test', + 'owner': 'user_test' + } + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_sd_mock_object('zapi', 'error', data).apply() + print(exc) + assert exc.value.args[0]['msg'] == 'Error deleting NTFS security descriptor sd_test: NetApp API failed. Reason - test:expect error' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp.py new file mode 100644 index 000000000..0632cff98 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp.py @@ -0,0 +1,143 @@ +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP snmp Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntp \ + import NetAppOntapNTPServer as my_module, main as uut_main # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + return { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'server_record': (200, { + "records": [{ + "server": "0.0.0.0", + "version": "auto", + }], + 'num_records': 1 + }, None), + 'create_server': (200, { + 'job': { + 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7', + '_links': { + 'self': { + 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}}} + }, None), + 'job': (200, { + "uuid": "fde79888-692a-11ea-80c2-005056b39fe7", + "state": "success", + "start_time": "2020-02-26T10:35:44-08:00", + "end_time": "2020-02-26T10:47:38-08:00", + "_links": { + "self": { + "href": "/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7" + } + } + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments: server_name' + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_get_server_called(mock_request, patch_ansible): + args = dict(default_args()) + args['server_name'] = '0.0.0.0' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['server_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_create_server_called(mock_request, patch_ansible): + args = dict(default_args()) + args['server_name'] = '0.0.0.0' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['create_server'], # create + SRR['job'], # Job + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_server_called(mock_request, patch_ansible): + args = dict(default_args()) + args['server_name'] = '0.0.0.0' + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['server_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp_key.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp_key.py new file mode 100644 index 000000000..9e4ed661e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ntp_key.py @@ -0,0 +1,141 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ntp_key \ + import NetAppOntapNTPKey as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'ntp_key': (200, { + "records": [ + { + "id": 1, + "digest_type": "sha1", + "value": "addf120b430021c36c232c99ef8d926aea2acd6b" + }], + "num_records": 1 + }, None), + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' +} + + +def test_get_ntp_key_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster/ntp/keys', SRR['empty_records']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_ntp_key() is None + + +def test_get_ntp_key_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster/ntp/keys', SRR['generic_error']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test'} + my_module_object = create_module(my_module, DEFAULT_ARGS, module_args) + msg = 'Error fetching key with id 1: calling: cluster/ntp/keys: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_ntp_key, 'fail')['msg'] + + +def test_create_ntp_key(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster/ntp/keys', SRR['empty_records']), + ('POST', 'cluster/ntp/keys', SRR['empty_good']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_ntp_key_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('POST', 'cluster/ntp/keys', SRR['generic_error']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.create_ntp_key, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating key with id 1: calling: cluster/ntp/keys: got Expected error.' == error + + +def test_delete_ntp_key(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster/ntp/keys', SRR['ntp_key']), + ('DELETE', 'cluster/ntp/keys/1', SRR['empty_good']) + ]) + module_args = {'state': 'absent', 'id': 1, 'digest_type': 'sha1', 'value': 'test'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_ntp_key_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('DELETE', 'cluster/ntp/keys/1', SRR['generic_error']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test', 'state': 'absent'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.delete_ntp_key, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting key with id 1: calling: cluster/ntp/keys/1: got Expected error.' == error + + +def test_modify_ntp_key(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster/ntp/keys', SRR['ntp_key']), + ('PATCH', 'cluster/ntp/keys/1', SRR['empty_good']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test2'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_ntp_key_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/ntp/keys', SRR['ntp_key']), + ('PATCH', 'cluster/ntp/keys/1', SRR['generic_error']) + ]) + module_args = {'id': 1, 'digest_type': 'sha1', 'value': 'test2'} + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + assert 'Error modifying key with id 1: calling: cluster/ntp/keys/1: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py new file mode 100644 index 000000000..b24c0e289 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme.py @@ -0,0 +1,185 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_nvme''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme \ + import NetAppONTAPNVMe as my_module + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'nvme': + xml = self.build_nvme_info() + elif self.type == 'nvme_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_nvme_info(): + ''' build xml data for nvme-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': [{'nvme-target-service-info': {'is-available': 'true'}}]} + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.193.75.3' + username = 'admin' + password = 'netapp1!' + vserver = 'ansible' + status_admin = True + else: + hostname = 'hostname' + username = 'username' + password = 'password' + vserver = 'vserver' + status_admin = True + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'vserver': vserver, + 'status_admin': status_admin, + 'use_rest': 'never', + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_nvme() for non-existent nvme''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + assert my_obj.get_nvme() is None + + def test_ensure_get_called_existing(self): + ''' test get_nvme() for existing nvme''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='nvme') + assert my_obj.get_nvme() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme.NetAppONTAPNVMe.create_nvme') + def test_successful_create(self, create_nvme): + ''' creating nvme and testing idempotency ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_nvme.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('nvme') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme.NetAppONTAPNVMe.delete_nvme') + def test_successful_delete(self, delete_nvme): + ''' deleting nvme and testing idempotency ''' + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('nvme') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + delete_nvme.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme.NetAppONTAPNVMe.modify_nvme') + def test_successful_modify(self, modify_nvme): + ''' modifying nvme and testing idempotency ''' + data = self.set_default_args() + data['status_admin'] = False + set_module_args(data) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('nvme') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + modify_nvme.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + data = self.set_default_args() + set_module_args(data) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('nvme') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('nvme_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_nvme() + assert 'Error fetching nvme info:' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_nvme() + assert 'Error creating nvme' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_nvme() + assert 'Error deleting nvme' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_nvme() + assert 'Error modifying nvme' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py new file mode 100644 index 000000000..b70b9fd10 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace.py @@ -0,0 +1,168 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_nvme_namespace''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace \ + import NetAppONTAPNVMENamespace as my_module + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'namespace': + xml = self.build_namespace_info() + elif self.type == 'quota_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_namespace_info(): + ''' build xml data for namespace-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 2, + 'attributes-list': [{'nvme-namespace-info': {'path': 'abcd/vol'}}, + {'nvme-namespace-info': {'path': 'xyz/vol'}}]} + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.193.75.3' + username = 'admin' + password = 'netapp1!' + vserver = 'ansible' + ostype = 'linux' + path = 'abcd/vol' + size = 20 + size_unit = 'mb' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + vserver = 'vserver' + ostype = 'linux' + path = 'abcd/vol' + size = 20 + size_unit = 'mb' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'ostype': ostype, + 'vserver': vserver, + 'path': path, + 'size': size, + 'size_unit': size_unit, + 'use_rest': 'never', + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_namespace() for non-existent namespace''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + assert my_obj.get_namespace() is None + + def test_ensure_get_called_existing(self): + ''' test get_namespace() for existing namespace''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='namespace') + assert my_obj.get_namespace() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.create_namespace') + def test_successful_create(self, create_namespace): + ''' creating namespace and testing idempotency ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_namespace.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('namespace') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace.NetAppONTAPNVMENamespace.delete_namespace') + def test_successful_delete(self, delete_namespace): + ''' deleting namespace and testing idempotency ''' + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('namespace') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + delete_namespace.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('quota_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_namespace() + assert 'Error fetching namespace info:' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_namespace() + assert 'Error creating namespace for path' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_namespace() + assert 'Error deleting namespace for path' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace_rest.py new file mode 100644 index 000000000..648caaf87 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_namespace_rest.py @@ -0,0 +1,121 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_aggregate when using REST """ + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_namespace \ + import NetAppONTAPNVMENamespace as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'nvme_namespace': (200, { + "name": "/vol/test/disk1", + "uuid": "81068ae6-4674-4d78-a8b7-dadb23f67edf", + "svm": { + "name": "ansibleSVM" + }, + "enabled": True + }, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'vserver': 'ansibleSVM', + 'ostype': 'linux', + 'path': '/vol/test/disk1', + 'size': 10, + 'size_unit': 'mb', + 'block_size': 4096 +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "path", "vserver"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_get_namespace_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/namespaces', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_namespace_rest() is None + + +def test_get_namespace_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/namespaces', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching namespace info for vserver: ansibleSVM' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_namespace_rest, 'fail')['msg'] + + +def test_create_namespace(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/namespaces', SRR['empty_records']), + ('POST', 'storage/namespaces', SRR['empty_good']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_create_namespace_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('POST', 'storage/namespaces', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.create_namespace_rest, 'fail')['msg'] + msg = 'Error creating namespace for vserver ansibleSVM: calling: storage/namespaces: got Expected error.' + assert msg == error + + +def test_delete_namespace(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/namespaces', SRR['nvme_namespace']), + ('DELETE', 'storage/namespaces/81068ae6-4674-4d78-a8b7-dadb23f67edf', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_namespace_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('DELETE', 'storage/namespaces/81068ae6-4674-4d78-a8b7-dadb23f67edf', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.namespace_uuid = '81068ae6-4674-4d78-a8b7-dadb23f67edf' + error = expect_and_capture_ansible_exception(my_obj.delete_namespace_rest, 'fail')['msg'] + msg = 'Error deleting namespace for vserver ansibleSVM: calling: storage/namespaces/81068ae6-4674-4d78-a8b7-dadb23f67edf: got Expected error.' + assert msg == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_rest.py new file mode 100644 index 000000000..db23a8e72 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_rest.py @@ -0,0 +1,131 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_aggregate when using REST """ + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme \ + import NetAppONTAPNVMe as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'nvme_service': (200, { + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "enabled": True + }, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'vserver': 'svm1' +} + + +def test_get_nvme_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/services', SRR['empty_records']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert my_obj.get_nvme_rest() is None + + +def test_get_nvme_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/services', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching nvme info for vserver: svm1' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_nvme_rest, 'fail')['msg'] + + +def test_create_nvme(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/services', SRR['empty_records']), + ('POST', 'protocols/nvme/services', SRR['empty_good']) + ]) + module_args = {'status_admin': True} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_nvme_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('POST', 'protocols/nvme/services', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['status_admin'] = True + error = expect_and_capture_ansible_exception(my_obj.create_nvme_rest, 'fail')['msg'] + msg = 'Error creating nvme for vserver svm1: calling: protocols/nvme/services: got Expected error.' + assert msg == error + + +def test_delete_nvme(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/services', SRR['nvme_service']), + ('PATCH', 'protocols/nvme/services/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['empty_good']), + ('DELETE', 'protocols/nvme/services/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_nvme_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('DELETE', 'protocols/nvme/services/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.svm_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.delete_nvme_rest, 'fail')['msg'] + msg = 'Error deleting nvme for vserver svm1: calling: protocols/nvme/services/02c9e252-41be-11e9-81d5-00a0986138f7: got Expected error.' + assert msg == error + + +def test_modify_nvme(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/services', SRR['nvme_service']), + ('PATCH', 'protocols/nvme/services/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['empty_good']) + ]) + module_args = {'status_admin': False} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_nvme_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('PATCH', 'protocols/nvme/services/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['status_admin'] = False + my_obj.svm_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.modify_nvme_rest, 'fail', {'status': False})['msg'] + msg = 'Error modifying nvme for vserver: svm1' + assert msg == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py new file mode 100644 index 000000000..0e6acdbec --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem.py @@ -0,0 +1,225 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_nvme_subsystem ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + expect_and_capture_ansible_exception, call_main, create_module, patch_ansible +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem import NetAppONTAPNVMESubsystem as my_module, main as my_main + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +subsystem_info = { + 'attributes-list': [{'nvme-target-subsystem-map-info': {'path': 'abcd/vol'}}, + {'nvme-target-subsystem-map-info': {'path': 'xyz/vol'}}]} + +subsystem_info_one_path = { + 'attributes-list': [{'nvme-target-subsystem-map-info': {'path': 'abcd/vol'}}]} + +subsystem_info_one_host = { + 'attributes-list': [{'nvme-target-subsystem-map-info': {'host-nqn': 'host-nqn'}}]} + +ZRR = zapi_responses({ + 'subsystem_info': build_zapi_response(subsystem_info, 2), + 'subsystem_info_one_path': build_zapi_response(subsystem_info_one_path, 1), + 'subsystem_info_one_host': build_zapi_response(subsystem_info_one_host, 1), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never', + 'subsystem': 'subsystem', + 'vserver': 'vserver', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + args = dict(DEFAULT_ARGS) + args.pop('subsystem') + error = 'missing required arguments: subsystem' + assert error in call_main(my_main, args, fail=True)['msg'] + + +def test_ensure_get_called(): + ''' test get_subsystem() for non-existent subsystem''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_subsystem() is None + + +def test_ensure_get_called_existing(): + ''' test get_subsystem() for existing subsystem''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_subsystem() + + +def test_successful_create(): + ''' creating subsystem and testing idempotency ''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['no_records']), + ('ZAPI', 'nvme-subsystem-create', ZRR['success']), + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + # idempptency + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + ]) + module_args = { + 'use_rest': 'never', + 'ostype': 'windows' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete(): + ''' deleting subsystem and testing idempotency ''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + ('ZAPI', 'nvme-subsystem-delete', ZRR['success']), + # idempptency + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'state': 'absent', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_get_host_map_called(): + ''' test get_subsystem_host_map() for non-existent subsystem''' + register_responses([ + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_subsystem_host_map('paths') is None + + +def test_ensure_get_host_map_called_existing(): + ''' test get_subsystem_host_map() for existing subsystem''' + register_responses([ + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['subsystem_info']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_subsystem_host_map('paths') + + +def test_successful_add(): + ''' adding subsystem host/map and testing idempotency ''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + ('ZAPI', 'nvme-subsystem-host-get-iter', ZRR['no_records']), + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['no_records']), + ('ZAPI', 'nvme-subsystem-host-add', ZRR['success']), + ('ZAPI', 'nvme-subsystem-map-add', ZRR['success']), + ('ZAPI', 'nvme-subsystem-map-add', ZRR['success']), + # idempptency + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + ('ZAPI', 'nvme-subsystem-host-get-iter', ZRR['subsystem_info_one_host']), + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['subsystem_info']), + ]) + module_args = { + 'use_rest': 'never', + 'ostype': 'windows', + 'paths': ['abcd/vol', 'xyz/vol'], + 'hosts': 'host-nqn' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_remove(): + ''' removing subsystem host/map and testing idempotency ''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info']), + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['subsystem_info']), + ('ZAPI', 'nvme-subsystem-map-remove', ZRR['success']), + # idempptency + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['subsystem_info_one_path']), + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['subsystem_info_one_path']), + ]) + module_args = { + 'use_rest': 'never', + 'paths': ['abcd/vol'], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + module_args = { + "use_rest": "never" + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_error_handling(): + ''' test error handling on ZAPI calls ''' + register_responses([ + ('ZAPI', 'nvme-subsystem-get-iter', ZRR['error']), + ('ZAPI', 'nvme-subsystem-create', ZRR['error']), + ('ZAPI', 'nvme-subsystem-delete', ZRR['error']), + ('ZAPI', 'nvme-subsystem-map-get-iter', ZRR['error']), + ('ZAPI', 'nvme-subsystem-host-add', ZRR['error']), + ('ZAPI', 'nvme-subsystem-map-add', ZRR['error']), + ('ZAPI', 'nvme-subsystem-host-remove', ZRR['error']), + ('ZAPI', 'nvme-subsystem-map-remove', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'ostype': 'windows' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = zapi_error_message('Error fetching subsystem info') + assert error in expect_and_capture_ansible_exception(my_obj.get_subsystem, 'fail')['msg'] + error = zapi_error_message('Error creating subsystem for subsystem') + assert error in expect_and_capture_ansible_exception(my_obj.create_subsystem, 'fail')['msg'] + error = zapi_error_message('Error deleting subsystem for subsystem') + assert error in expect_and_capture_ansible_exception(my_obj.delete_subsystem, 'fail')['msg'] + error = zapi_error_message('Error fetching subsystem path info') + assert error in expect_and_capture_ansible_exception(my_obj.get_subsystem_host_map, 'fail', 'paths')['msg'] + error = zapi_error_message('Error adding hostname for subsystem subsystem') + assert error in expect_and_capture_ansible_exception(my_obj.add_subsystem_host_map, 'fail', ['hostname'], 'hosts')['msg'] + error = zapi_error_message('Error adding pathname for subsystem subsystem') + assert error in expect_and_capture_ansible_exception(my_obj.add_subsystem_host_map, 'fail', ['pathname'], 'paths')['msg'] + error = zapi_error_message('Error removing hostname for subsystem subsystem') + assert error in expect_and_capture_ansible_exception(my_obj.remove_subsystem_host_map, 'fail', ['hostname'], 'hosts')['msg'] + error = zapi_error_message('Error removing pathname for subsystem subsystem') + assert error in expect_and_capture_ansible_exception(my_obj.remove_subsystem_host_map, 'fail', ['pathname'], 'paths')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem_rest.py new file mode 100644 index 000000000..693816662 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_nvme_subsystem_rest.py @@ -0,0 +1,256 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_nvme_subsystem when using REST """ + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + patch_ansible, call_main, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_nvme_subsystem\ + import NetAppONTAPNVMESubsystem as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'nvme_subsystem': (200, { + "hosts": [{ + "nqn": "nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test3" + }], + "name": "subsystem1", + "uuid": "81068ae6-4674-4d78-a8b7-dadb23f67edf", + "comment": "string", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "os_type": "hyper_v", + "subsystem_maps": [{ + "namespace": { + "name": "/vol/test3/disk1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + }], + "enabled": True, + }, None), + # 'nvme_host': (200, [{ + # "nqn": "nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test3" + # }], None), + 'nvme_map': (200, { + "records": [{ + "namespace": { + "name": "/vol/test3/disk1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + }, + }], "num_records": 1, + }, None), + + 'nvme_host': (200, { + "records": [ + { + "nqn": "nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test3", + "subsystem": { + "uuid": "81068ae6-4674-4d78-a8b7-dadb23f67edf" + } + } + ], + "num_records": 1 + }, None), + + 'error_svm_not_found': (400, None, 'SVM "ansibleSVM" does not exist.') +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'vserver': 'ansibleSVM', + 'ostype': 'linux', + 'subsystem': 'subsystem1', +} + + +def test_get_subsystem_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['empty_records']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + assert my_module_object.get_subsystem_rest() is None + + +def test_get_subsystem_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['generic_error']), + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching subsystem info for vserver: ansibleSVM' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_subsystem_rest, 'fail')['msg'] + + +def test_create_subsystem(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['empty_records']), + ('POST', 'protocols/nvme/subsystems', SRR['empty_good']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_create_subsystem_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('POST', 'protocols/nvme/subsystems', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['zero_records']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = 'Error creating subsystem for vserver ansibleSVM: calling: protocols/nvme/subsystems: got Expected error.' + assert error in expect_and_capture_ansible_exception(my_obj.create_subsystem_rest, 'fail')['msg'] + args = dict(DEFAULT_ARGS) + del args['ostype'] + error = "Error: Missing required parameter 'ostype' for creating subsystem" + assert error in call_main(my_main, args, fail=True)['msg'] + + +def test_delete_subsystem(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('DELETE', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_subsystem_no_vserver(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['error_svm_not_found']), + ]) + module_args = {'state': 'absent'} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_subsystem_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('DELETE', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.subsystem_uuid = '81068ae6-4674-4d78-a8b7-dadb23f67edf' + error = expect_and_capture_ansible_exception(my_obj.delete_subsystem_rest, 'fail')['msg'] + msg = 'Error deleting subsystem for vserver ansibleSVM: calling: protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf: got Expected error.' + assert msg == error + + +def test_add_subsystem_host(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['empty_records']), + ('POST', 'protocols/nvme/subsystems', SRR['empty_good']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('POST', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf/hosts', SRR['empty_good']) + ]) + module_args = {'hosts': ['nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test3']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_add_only_subsystem_host(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('GET', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf/hosts', SRR['empty_records']), + ('POST', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf/hosts', SRR['empty_good']) + ]) + module_args = {'hosts': ['nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test3']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_add_subsystem_map(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['empty_records']), + ('POST', 'protocols/nvme/subsystems', SRR['empty_good']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('POST', 'protocols/nvme/subsystem-maps', SRR['empty_good']) + ]) + module_args = {'paths': ['/vol/test3/disk1']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_add_only_subsystem_map(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('GET', 'protocols/nvme/subsystem-maps', SRR['empty_records']), + ('POST', 'protocols/nvme/subsystem-maps', SRR['empty_good']) + ]) + module_args = {'paths': ['/vol/test3/disk1']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_remove_only_subsystem_host(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('GET', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf/hosts', SRR['nvme_host']), + ('POST', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf/hosts', SRR['empty_good']), + ('DELETE', 'protocols/nvme/subsystems/81068ae6-4674-4d78-a8b7-dadb23f67edf/' + 'hosts/nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test3', SRR['empty_good']) + ]) + module_args = {'hosts': ['nqn.1992-08.com.netapp:sn.f2207584d03611eca164005056b3bd39:subsystem.test']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_remove_only_subsystem_map(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems', SRR['nvme_subsystem']), + ('GET', 'protocols/nvme/subsystem-maps', SRR['nvme_map']), + ('POST', 'protocols/nvme/subsystem-maps', SRR['empty_good']), + ('DELETE', 'protocols/nvme/subsystem-maps/81068ae6-4674-4d78-a8b7-dadb23f67edf/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_good']) + ]) + module_args = {'paths': ['/vol/test2/disk1']} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_errors(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'protocols/nvme/subsystems/None/hosts', SRR['generic_error']), + ('GET', 'protocols/nvme/subsystem-maps', SRR['generic_error']), + ('POST', 'protocols/nvme/subsystems/None/hosts', SRR['generic_error']), + ('POST', 'protocols/nvme/subsystem-maps', SRR['generic_error']), + ('DELETE', 'protocols/nvme/subsystems/None/hosts/host', SRR['generic_error']), + ('DELETE', 'protocols/nvme/subsystem-maps/None/None', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + error = rest_error_message('Error fetching subsystem host info for vserver: ansibleSVM', 'protocols/nvme/subsystems/None/hosts') + assert error in expect_and_capture_ansible_exception(my_module_object.get_subsystem_host_map_rest, 'fail', 'hosts')['msg'] + error = rest_error_message('Error fetching subsystem map info for vserver: ansibleSVM', 'protocols/nvme/subsystem-maps') + assert error in expect_and_capture_ansible_exception(my_module_object.get_subsystem_host_map_rest, 'fail', 'paths')['msg'] + error = rest_error_message('Error adding [] for subsystem subsystem1', 'protocols/nvme/subsystems/None/hosts') + assert error in expect_and_capture_ansible_exception(my_module_object.add_subsystem_host_map_rest, 'fail', [], 'hosts')['msg'] + error = rest_error_message('Error adding path for subsystem subsystem1', 'protocols/nvme/subsystem-maps') + assert error in expect_and_capture_ansible_exception(my_module_object.add_subsystem_host_map_rest, 'fail', ['path'], 'paths')['msg'] + error = rest_error_message('Error removing host for subsystem subsystem1', 'protocols/nvme/subsystems/None/hosts/host') + assert error in expect_and_capture_ansible_exception(my_module_object.remove_subsystem_host_map_rest, 'fail', ['host'], 'hosts')['msg'] + error = rest_error_message('Error removing path for subsystem subsystem1', 'protocols/nvme/subsystem-maps/None/None') + assert error in expect_and_capture_ansible_exception(my_module_object.remove_subsystem_host_map_rest, 'fail', ['path'], 'paths')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py new file mode 100644 index 000000000..91430883c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_object_store.py @@ -0,0 +1,538 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_object_store """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_object_store \ + import NetAppOntapObjectStoreConfig as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_uuid': (200, {'records': [{'uuid': 'ansible'}]}, None), + 'get_object_store': (200, + {'uuid': 'ansible', + 'name': 'ansible', + 'provider_type': 'abc', + 'access_key': 'abc', + 'owner': 'fabricpool' + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + print('IN:', xml.to_string()) + if self.type == 'object_store': + xml = self.build_object_store_info() + elif self.type == 'object_store_not_found': + self.type = 'object_store' + raise netapp_utils.zapi.NaApiError(code='15661', message="This exception is from the unit test - 15661") + elif self.type == 'object_store_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + print('OUT:', xml.to_string()) + return xml + + @staticmethod + def build_object_store_info(): + ''' build xml data for object store ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'attributes': + {'aggr-object-store-config-info': + {'object-store-name': 'ansible', + 'provider-type': 'abc', + 'access-key': 'abc', + 'server': 'abc', + 's3-name': 'abc', + 'ssl-enabled': 'true', + 'port': '1234', + 'is-certificate-validation-enabled': 'true'} + } + } + xml.translate_struct(data) + print(xml.to_string()) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + # whether to use a mock or a simulator + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + name = 'ansible' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + name = 'ansible' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'name': name, + 'feature_flags': {'no_cserver_ems': True} + }) + + def call_command(self, module_args): + ''' utility function to call apply ''' + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + # mock the connection + my_obj.server = MockONTAPConnection('object_store') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + return exc.value.args[0]['changed'] + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_netapp_lib(self, mock_request, mock_has_netapp_lib): + ''' fetching details of object store ''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + mock_has_netapp_lib.return_value = False + set_module_args(self.set_default_args()) + with pytest.raises(AnsibleFailJson) as exc: + my_main() + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_ensure_object_store_get_called(self, mock_request): + ''' fetching details of object store ''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + assert my_obj.get_aggr_object_store() is None + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_ensure_get_called_existing(self, mock_request): + ''' test for existing object store''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='object_store') + object_store = my_obj.get_aggr_object_store() + assert object_store + assert 'name' in object_store + assert object_store['name'] == 'ansible' + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_object_store_create(self, mock_request): + ''' test for creating object store''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + module_args = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc', + 'port': 1234, + 'certificate_validation_enabled': True, + 'ssl_enabled': True + } + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.onbox: + # mock the connection + my_obj.server = MockONTAPConnection(kind='object_store_not_found') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_object_store_negative_create_bad_owner(self, mock_request): + ''' test for creating object store''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + module_args = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc', + 'port': 1234, + 'certificate_validation_enabled': True, + 'ssl_enabled': True, + 'owner': 'snapmirror' + } + module_args.update(self.set_default_args()) + set_module_args(module_args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print(exc.value.args[0]) + assert exc.value.args[0]['msg'] == 'Error: unsupported value for owner: snapmirror when using ZAPI.' + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_object_store_delete(self, mock_request): + ''' test for deleting object store''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + module_args = { + 'state': 'absent', + } + changed = self.call_command(module_args) + assert changed + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_object_store_modify(self, mock_request): + ''' test for modifying object store''' + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + module_args = { + 'provider_type': 'abc', + 'server': 'abc2', + 'container': 'abc', + 'access_key': 'abc2', + 'secret_password': 'abc' + } + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='object_store') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = 'Error - modify is not supported with ZAPI' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == 'Error calling: cloud/targets: got %s.' % SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc', + 'port': 1234, + 'certificate_validation_enabled': True, + 'ssl_enabled': True + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_negative_create_missing_parameter(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'secret_password': 'abc', + 'port': 1234, + 'certificate_validation_enabled': True, + 'ssl_enabled': True + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = 'Error provisioning object store ansible: one of the following parameters are missing' + assert msg in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_negative_create_api_error(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc', + 'port': 1234, + 'certificate_validation_enabled': True, + 'ssl_enabled': True + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_main() + assert exc.value.args[0]['msg'] == 'Error calling: cloud/targets: got %s.' % SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_modify(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc2', + 'access_key': 'abc2', + 'secret_password': 'abc' + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_object_store'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_negative_modify_rest_error(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc2', + 'access_key': 'abc2', + 'secret_password': 'abc' + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_object_store'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == 'Error calling: cloud/targets/ansible: got %s.' % SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_negative_modify_owner(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc2', + 'access_key': 'abc2', + 'secret_password': 'abc', + 'owner': 'snapmirror' + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_object_store'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == 'Error modifying object store, owner cannot be changed. Found: snapmirror.' + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_modify_password(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc2', + 'change_password': True + + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_object_store'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert exc.value.args[0]['changed'] + assert 'secret_password' in exc.value.args[0]['modify'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_idempotent(self, mock_request): + data = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc2' + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_object_store'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print(mock_request.mock_calls) + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_delete(self, mock_request): + data = { + 'state': 'absent', + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['get_object_store'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_negative_delete(self, mock_request): + data = { + 'state': 'absent', + } + data.update(self.set_default_args()) + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_object_store'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == 'Error calling: cloud/targets/ansible: got %s.' % SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_if_all_methods_catch_exception(self, mock_request): + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + module_args = { + 'provider_type': 'abc', + 'server': 'abc', + 'container': 'abc', + 'access_key': 'abc', + 'secret_password': 'abc' + } + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('object_store_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_aggr_object_store() + assert '' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_aggr_object_store(None) + assert 'Error provisioning object store config ' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_aggr_object_store() + assert 'Error removing object store config ' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_partitions.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_partitions.py new file mode 100644 index 000000000..975ffb161 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_partitions.py @@ -0,0 +1,515 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP disks Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_partitions \ + import NetAppOntapPartitions as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def default_args(): + args = { + 'disk_type': 'SAS', + 'partitioning_method': 'root_data', + 'partition_type': 'data', + 'partition_count': 13, + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'node': 'node1', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'owned_partitions_record': (200, { + "records": [ + { + "partition": "1.0.0.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + }, + { + "partition": "1.0.2.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + }, + { + "partition": "1.0.4.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + } + ], + "num_records": 3 + }, None), + + 'unassigned_partitions_record': (200, { + "records": [ + { + "partition": "1.0.25.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + }, + { + "partition": "1.0.27.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + }, + ], + "num_records": 2 + }, None), + + 'unassigned_disks_record': (200, { + "records": [ + { + 'name': '1.0.27', + 'type': 'sas', + 'container_type': 'unassigned', + 'home_node': {'name': 'node1'}}, + { + 'name': '1.0.28', + 'type': 'sas', + 'container_type': 'unassigned', + 'home_node': {'name': 'node1'}} + ], + 'num_records': 2}, None), + + 'home_spare_disk_info_record': (200, { + 'records': [], + 'num_records': 2}, None), + + 'spare_partitions_record': (200, { + "records": [ + { + "partition": "1.0.0.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + }, + { + "partition": "1.0.1.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "fas2552-rtp-13-02", + "owner_node_name": "fas2552-rtp-13-02" + } + ], 'num_records': 2 + }, None), + + 'partner_spare_partitions_record': (200, { + "records": [ + { + "partition": "1.0.1.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "node2", + "owner_node_name": "node2" + }, + { + "partition": "1.0.3.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "node2", + "owner_node_name": "node2" + }, + { + "partition": "1.0.5.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "node2", + "owner_node_name": "node2" + }, + { + "partition": "1.0.23.P1", + "container_type": "spare", + "partitioning_method": "root_data", + "is_root": False, + "disk_type": "sas", + "home_node_name": "node2", + "owner_node_name": "node2" + } + ], "num_records": 4 + }, None), + + 'partner_node_name_record': (200, { + 'records': [ + { + 'uuid': 'c345c182-a6a0-11eb-af7b-00a0984839de', + 'name': 'node2', + 'ha': { + 'partners': [ + {'name': 'node1'} + ] + } + } + ], 'num_records': 1 + }, None), + + 'partner_spare_disks_record': (200, { + 'records': [ + { + 'name': '1.0.22', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.20', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.18', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + }, + { + 'name': '1.0.16', + 'type': 'sas', + 'container_type': 'spare', + 'home_node': {'name': 'node2'} + } + ], 'num_records': 4 + }, None), + + 'adp2_owned_partitions_record': (200, { + "records": [ + { + "partition": "1.0.0.P1", + "container_type": "spare", + "partitioning_method": "root_data1_data2", + "is_root": False, + "disk_type": "ssd", + "home_node_name": "aff300-rtp-2b", + "owner_node_name": "aff300-rtp-2b" + }, + { + "partition": "1.0.1.P1", + "container_type": "spare", + "partitioning_method": "root_data1_data2", + "is_root": False, + "disk_type": "ssd", + "home_node_name": "aff300-rtp-2b", + "owner_node_name": "aff300-rtp-2b" + }, + { + "partition": "1.0.23.P1", + "container_type": "spare", + "partitioning_method": "root_data1_data2", + "is_root": False, + "disk_type": "ssd", + "home_node_name": "aff300-rtp-2b", + "owner_node_name": "aff300-rtp-2b" + } + ], "num_records": 3 + }, None), +} + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument ##WHAT DOES THIS METHOD DO + ''' create scope ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +# get unassigned partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign_unassigned_disks(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partition_count'] = 5 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_partitions_record'], + SRR['home_spare_disk_info_record'], + SRR['unassigned_partitions_record'], + SRR['unassigned_disks_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_partitions_record'], + SRR['partner_spare_disks_record'], + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 6 + + +# assign unassigned partitions + steal 2 partner spare partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign_unassigned_and_partner_spare_disks(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partition_count'] = 7 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_partitions_record'], + SRR['home_spare_disk_info_record'], + SRR['unassigned_partitions_record'], + SRR['unassigned_disks_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_partitions_record'], + SRR['partner_spare_disks_record'], + SRR['empty_good'], # unassign + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 9 + + +# assign unassigned partitions + steal 2 partner spare partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign_unassigned_and_partner_spare_partitions_and_disks(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partition_count'] = 6 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_partitions_record'], + SRR['home_spare_disk_info_record'], + SRR['unassigned_partitions_record'], + SRR['unassigned_disks_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_partitions_record'], + SRR['partner_spare_disks_record'], + SRR['empty_good'], # unassign + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 8 + + +# Should unassign partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_unassign(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partition_count'] = 2 # change this number to be less than currently assigned partions to the node + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_partitions_record'], + SRR['unassigned_partitions_record'], + SRR['spare_partitions_record'], + SRR['empty_good'], # unassign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 5 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' disk_count matches arguments, do nothing ''' + args = dict(default_args()) + args['partition_count'] = 3 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['owned_partitions_record'], + SRR['unassigned_partitions_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +# ADP2 +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign_unassigned_disks_adp2(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partitioning_method'] = 'root_data1_data2' + args['partition_type'] = 'data1' + args['partition_count'] = 5 # change this dependant on data1 partitions + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['adp2_owned_partitions_record'], + SRR['home_spare_disk_info_record'], + SRR['unassigned_partitions_record'], + SRR['unassigned_disks_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_partitions_record'], + SRR['partner_spare_disks_record'], + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 6 + + +# assign unassigned partitions + steal 2 partner spare partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign_unassigned_and_partner_spare_disks_adp2(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partitioning_method'] = 'root_data1_data2' + args['partition_type'] = 'data1' + args['partition_count'] = 7 # data1 partitions + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['adp2_owned_partitions_record'], + SRR['home_spare_disk_info_record'], + SRR['unassigned_partitions_record'], + SRR['unassigned_disks_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_partitions_record'], + SRR['partner_spare_disks_record'], + SRR['empty_good'], # unassign + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 9 + + +# assign unassigned partitions + steal 2 partner spare partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_assign_unassigned_and_partner_spare_partitions_and_disks_adp2(mock_request, patch_ansible): + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partitioning_method'] = 'root_data1_data2' + args['partition_type'] = 'data1' + args['partition_count'] = 6 # change this dependant on data1 partitions + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['adp2_owned_partitions_record'], + SRR['home_spare_disk_info_record'], + SRR['unassigned_partitions_record'], + SRR['unassigned_disks_record'], + SRR['partner_node_name_record'], + SRR['partner_spare_partitions_record'], + SRR['partner_spare_disks_record'], + SRR['empty_good'], # unassign + SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 8 + + +# Should unassign partitions +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_unassign_adp2(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Steal disks from partner node and assign them to the requested node ''' + args = dict(default_args()) + args['partitioning_method'] = 'root_data1_data2' + args['partition_type'] = 'data1' + args['partition_count'] = 2 # change this number to be less than currently assigned partions to the node + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['adp2_owned_partitions_record'], + SRR['unassigned_partitions_record'], + SRR['spare_partitions_record'], + SRR['empty_good'], # unassign + # SRR['empty_good'], # assign + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 5 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py new file mode 100644 index 000000000..8256024ae --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ports.py @@ -0,0 +1,864 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for ONTAP Ansible module: na_ontap_port''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports \ + import NetAppOntapPorts as port_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.type = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + self.xml_out = xml + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def mock_args(self, choice): + if choice == 'broadcast_domain': + return { + 'names': ['test_port_1', 'test_port_2'], + 'resource_name': 'test_domain', + 'resource_type': 'broadcast_domain', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' + } + elif choice == 'portset': + return { + 'names': ['test_lif'], + 'resource_name': 'test_portset', + 'resource_type': 'portset', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'vserver': 'test_vserver', + 'use_rest': 'never' + } + + def get_port_mock_object(self): + """ + Helper method to return an na_ontap_port object + """ + port_obj = port_module() + port_obj.server = MockONTAPConnection() + return port_obj + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_broadcast_domain_ports') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.get_broadcast_domain_ports') + def test_successfully_add_broadcast_domain_ports(self, get_broadcast_domain_ports, add_broadcast_domain_ports): + ''' Test successful add broadcast domain ports ''' + data = self.mock_args('broadcast_domain') + set_module_args(data) + get_broadcast_domain_ports.side_effect = [ + [] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_broadcast_domain_ports') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.get_broadcast_domain_ports') + def test_add_broadcast_domain_ports_idempotency(self, get_broadcast_domain_ports, add_broadcast_domain_ports): + ''' Test add broadcast domain ports idempotency ''' + data = self.mock_args('broadcast_domain') + set_module_args(data) + get_broadcast_domain_ports.side_effect = [ + ['test_port_1', 'test_port_2'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_portset_ports') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.portset_get') + def test_successfully_add_portset_ports(self, portset_get, add_portset_ports): + ''' Test successful add portset ports ''' + data = self.mock_args('portset') + set_module_args(data) + portset_get.side_effect = [ + [] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_portset_ports') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.portset_get') + def test_add_portset_ports_idempotency(self, portset_get, add_portset_ports): + ''' Test add portset ports idempotency ''' + data = self.mock_args('portset') + set_module_args(data) + portset_get.side_effect = [ + ['test_lif'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_broadcast_domain_ports') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.get_broadcast_domain_ports') + def test_successfully_remove_broadcast_domain_ports(self, get_broadcast_domain_ports, add_broadcast_domain_ports): + ''' Test successful remove broadcast domain ports ''' + data = self.mock_args('broadcast_domain') + data['state'] = 'absent' + set_module_args(data) + get_broadcast_domain_ports.side_effect = [ + ['test_port_1', 'test_port_2'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.add_portset_ports') + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_ports.NetAppOntapPorts.portset_get') + def test_remove_add_portset_ports(self, portset_get, add_portset_ports): + ''' Test successful remove portset ports ''' + data = self.mock_args('portset') + data['state'] = 'absent' + set_module_args(data) + portset_get.side_effect = [ + ['test_lif'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_port_mock_object().apply() + assert exc.value.args[0]['changed'] + + +def default_args(choice=None, resource_name=None, portset_type=None): + args = { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + if choice == 'broadcast_domain': + args['resource_type'] = "broadcast_domain" + args['resource_name'] = "domain2" + args['ipspace'] = "ip1" + args['names'] = ["mohan9cluster2-01:e0b", "mohan9cluster2-01:e0d"] + return args + if choice == 'portset': + args['portset_type'] = portset_type + args['resource_name'] = resource_name + args['resource_type'] = 'portset' + args['vserver'] = 'svm3' + return args + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_7': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'port_detail_e0d': (200, { + "num_records": 1, + "records": [ + { + 'name': 'e0d', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': 'ea670505-2ab3-11ec-aa30-005056b3dfc8' + }] + }, None), + 'port_detail_e0a': (200, { + "num_records": 1, + "records": [ + { + 'name': 'e0a', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': 'ea63420b-2ab3-11ec-aa30-005056b3dfc8' + }] + }, None), + 'port_detail_e0b': (200, { + "num_records": 1, + "records": [ + { + 'name': 'e0b', + 'node': {'name': 'mohan9cluster2-01'}, + 'uuid': 'ea64c0f2-2ab3-11ec-aa30-005056b3dfc8' + }] + }, None), + 'broadcast_domain_record': (200, { + "num_records": 1, + "records": [ + { + "uuid": "4475a2c8-f8a0-11e8-8d33-005056bb986f", + "name": "domain1", + "ipspace": {"name": "ip1"}, + "ports": [ + { + "uuid": "ea63420b-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0a", + "node": { + "name": "mohan9cluster2-01" + } + }, + { + "uuid": "ea64c0f2-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0b", + "node": { + "name": "mohan9cluster2-01" + } + }, + { + "uuid": "ea670505-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0d", + "node": { + "name": "mohan9cluster2-01" + } + } + ], + "mtu": 9000 + }] + }, None), + 'broadcast_domain_record1': (200, { + "num_records": 1, + "records": [ + { + "uuid": "4475a2c8-f8a0-11e8-8d33-005056bb986f", + "name": "domain2", + "ipspace": {"name": "ip1"}, + "ports": [ + { + "uuid": "ea63420b-2ab3-11ec-aa30-005056b3dfc8", + "name": "e0a", + "node": { + "name": "mohan9cluster2-01" + } + } + ], + "mtu": 9000 + }] + }, None), + 'iscsips': (200, { + "num_records": 1, + "records": [ + { + "uuid": "52e31a9d-72e2-11ec-95ea-005056b3b297", + "svm": {"name": "svm3"}, + "name": "iscsips" + }] + }, None), + 'iscsips_updated': (200, { + "num_records": 1, + "records": [ + { + "uuid": "52e31a9d-72e2-ec11-95ea-005056b3b298", + "svm": {"name": "svm3"}, + "name": "iscsips_updated", + "interfaces": [ + { + "uuid": "6a82e94a-72da-11ec-95ea-005056b3b297", + "ip": {"name": "lif_svm3_856"} + }] + }] + }, None), + 'mixedps': (200, { + "num_records": 1, + "records": [ + { + "uuid": "ba02916a-72da-11ec-95ea-005056b3b297", + "svm": { + "name": "svm3" + }, + "name": "mixedps", + "interfaces": [ + { + "uuid": "2c373289-728f-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_2"} + }, + { + "uuid": "d229cc03-7797-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_1_1"} + }, + { + "uuid": "d24e03c6-7797-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_1_2"} + }] + }] + }, None), + 'mixedps_updated': (200, { + "num_records": 1, + "records": [ + { + "uuid": "ba02916a-72da-11ec-95ea-005056b3b297", + "svm": { + "name": "svm3" + }, + "name": "mixedps_updated", + "interfaces": [ + { + "uuid": "6a82e94a-72da-11ec-95ea-005056b3b297", + "ip": {"name": "lif_svm3_856"} + }, + { + "uuid": "2bf30606-728f-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_1"} + }, + { + "uuid": "2c373289-728f-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_2"} + }, + { + "uuid": "d229cc03-7797-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_1_1"} + }, + { + "uuid": "d24e03c6-7797-11ec-95ea-005056b3b297", + "fc": {"name": "lif_svm3_681_1_2"} + }] + }] + }, None), + 'lif_svm3_681_1_1': (200, { + "num_records": 1, + "records": [{"uuid": "d229cc03-7797-11ec-95ea-005056b3b297"}] + }, None), + 'lif_svm3_681_1_2': (200, { + "num_records": 1, + "records": [{"uuid": "d24e03c6-7797-11ec-95ea-005056b3b297"}] + }, None), + 'lif_svm3_681_1': (200, { + "num_records": 1, + "records": [{"uuid": "2bf30606-728f-11ec-95ea-005056b3b297"}] + }, None), + 'lif_svm3_681_2': (200, { + "num_records": 1, + "records": [{"uuid": "2c373289-728f-11ec-95ea-005056b3b297"}] + }, None), + 'lif_svm3_856': (200, { + "num_records": 1, + "records": [{"uuid": "6a82e94a-72da-11ec-95ea-005056b3b297"}] + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + port_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments:' + assert msg in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_add_broadcast_domain_port_rest(mock_request, patch_ansible): + ''' test add broadcast domain port''' + args = dict(default_args('broadcast_domain')) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['broadcast_domain_record1'], # get + SRR['empty_good'], # add e0b + SRR['empty_good'], # add e0d + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_add_broadcast_domain_port_rest_idempotent(mock_request, patch_ansible): + ''' test add broadcast domain port''' + args = dict(default_args('broadcast_domain')) + args['resource_name'] = "domain2" + args['names'] = ["mohan9cluster2-01:e0a"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0a'], + SRR['broadcast_domain_record1'], # get + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_remove_broadcast_domain_port_rest(mock_request, patch_ansible): + ''' test remove broadcast domain port''' + args = dict(default_args('broadcast_domain')) + args['resource_name'] = "domain1" + args['names'] = ["mohan9cluster2-01:e0b", "mohan9cluster2-01:e0d"] + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['broadcast_domain_record'], # get + SRR['empty_good'], # remove e0b and e0d + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_remove_broadcast_domain_port_rest_idempotent(mock_request, patch_ansible): + ''' test remove broadcast domain port''' + args = dict(default_args('broadcast_domain')) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['broadcast_domain_record1'], # get + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_get_ports_rest(mock_request, patch_ansible): + ''' test get port ''' + args = dict(default_args('broadcast_domain')) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['generic_error'], # Error in getting ports + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = port_module() + print('Info: %s' % exc.value.args[0]) + msg = 'calling: network/ethernet/ports: got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_get_broadcast_domain_ports_rest(mock_request, patch_ansible): + ''' test get broadcast domain ''' + args = dict(default_args('broadcast_domain')) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['generic_error'], # Error in getting broadcast domain ports + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'calling: network/ethernet/broadcast-domains: got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_add_broadcast_domain_ports_rest(mock_request, patch_ansible): + ''' test add broadcast domain ports ''' + args = dict(default_args('broadcast_domain')) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['broadcast_domain_record1'], # get + SRR['generic_error'], # Error in adding ports + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_remove_broadcast_domain_ports_rest(mock_request, patch_ansible): + ''' test remove broadcast domain ports ''' + args = dict(default_args('broadcast_domain')) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['port_detail_e0b'], + SRR['port_detail_e0d'], + SRR['broadcast_domain_record'], # get + SRR['generic_error'], # Error in removing ports + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error removing ports: calling: private/cli/network/port/broadcast-domain/remove-ports: got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_invalid_ports_rest(mock_request, patch_ansible): + ''' test remove broadcast domain ports ''' + args = dict(default_args('broadcast_domain')) + args['names'] = ["mohan9cluster2-01e0b"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['generic_error'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = port_module() + print('Info: %s' % exc.value.args[0]) + msg = 'Error: Invalid value specified for port: mohan9cluster2-01e0b, provide port name as node_name:port_name' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_broadcast_domain_missing_ports_rest(mock_request, patch_ansible): + ''' test get ports ''' + args = dict(default_args('broadcast_domain')) + args['names'] = ["mohan9cluster2-01:e0l"] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = port_module() + print('Info: %s' % exc.value.args[0]) + msg = 'Error: ports: mohan9cluster2-01:e0l not found' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_add_portset_port_iscsi_rest(mock_request, patch_ansible): + ''' test add portset port''' + args = dict(default_args('portset', 'iscsips', 'iscsi')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['iscsips'], # get portset + SRR['empty_good'], # add lif_svm3_856 + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_add_portset_port_iscsi_rest_idempotent(mock_request, patch_ansible): + ''' test add portset port''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['iscsips_updated'], # get + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_remove_portset_port_iscsi_rest(mock_request, patch_ansible): + ''' test remove portset port''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['iscsips_updated'], + SRR['empty_good'], # remove lif_svm3_856 + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_add_portset_port_mixed_rest(mock_request, patch_ansible): + ''' test add portset port''' + args = dict(default_args('portset', 'mixedps', 'mixed')) + args['names'] = ['lif_svm3_856', 'lif_svm3_681_1'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], # get lif_svm3_856 in ip + SRR['zero_record'], # lif_svm3_856 not found in fc + SRR['zero_record'], # lif_svm3_681_1 not found in ip + SRR['lif_svm3_681_1'], # get lif_svm3_681_1 in fc + SRR['mixedps'], # get portset + SRR['empty_good'], # Add both ip and fc to mixed portset + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_get_portset_fetching_rest(mock_request, patch_ansible): + ''' test get port ''' + args = dict(default_args('portset', 'iscsips_updated', 'mixed')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['generic_error'], # Error in getting portset + SRR['generic_error'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = port_module() + print('Info: %s' % exc.value.args[0]) + msg = 'Error fetching lifs details for lif_svm3_856: calling: network/ip/interfaces: got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_get_portset_fetching_portset_ip_rest(mock_request, patch_ansible): + ''' test get port ip''' + args = dict(default_args('portset', 'iscsips_updated', 'ip')) + args['names'] = ['lif_svm3_856'] + del args['portset_type'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['generic_error'], + SRR['iscsips_updated'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj = port_module() + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_get_portset_fetching_portset_fcp_rest(mock_request, patch_ansible): + ''' test get port fcp''' + args = dict(default_args('portset', 'mixedps_updated', 'fcp')) + args['names'] = ['lif_svm3_681_1'] + del args['portset_type'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['generic_error'], + SRR['lif_svm3_681_1'], + SRR['mixedps_updated'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj = port_module() + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_get_portset_rest(mock_request, patch_ansible): + ''' test get portset ''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['generic_error'], # Error in getting portset + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'calling: protocols/san/portsets: got Expected error' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_get_portset_error_rest(mock_request, patch_ansible): + ''' test get portset ''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['zero_record'], + SRR['generic_error'], # Error in getting portset + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = "Error: Portset 'iscsips_updated' does not exist" + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_get_portset_missing_rest(mock_request, patch_ansible): + ''' test get portset ''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['zero_record'], + SRR['generic_error'], # Error in getting portset + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = port_module() + print('Info: %s' % exc.value.args[0]) + msg = "Error: lifs: lif_svm3_856 of type iscsi not found in vserver svm3" + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_get_portset_missing_state_absent_rest(mock_request, patch_ansible): + ''' test get portset ''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['zero_record'], + SRR['end_of_sequence'] + ] + my_obj = port_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_add_portset_ports_rest(mock_request, patch_ansible): + ''' test add portset ports ''' + args = dict(default_args('portset', 'iscsips', 'iscsi')) + args['names'] = ['lif_svm3_856'] + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['iscsips'], + SRR['generic_error'], # Error in adding ports + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'calling: protocols/san/portsets/52e31a9d-72e2-11ec-95ea-005056b3b297/interfaces: got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_module_error_remove_portset_ports_rest(mock_request, patch_ansible): + ''' test remove broadcast domain ports ''' + args = dict(default_args('portset', 'iscsips_updated', 'iscsi')) + args['names'] = ['lif_svm3_856'] + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], # get version + SRR['lif_svm3_856'], + SRR['iscsips_updated'], + SRR['generic_error'], # Error in removing ports + ] + my_obj = port_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'calling: protocols/san/portsets/52e31a9d-72e2-ec11-95ea-005056b3b298/interfaces/6a82e94a-72da-11ec-95ea-005056b3b297: got Expected error.' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py new file mode 100644 index 000000000..2e68e58c9 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_portset.py @@ -0,0 +1,390 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for ONTAP Ansible module: na_ontap_portset''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_portset \ + import NetAppONTAPPortset as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +DEFAULT_ARGS = { + 'state': 'present', + 'name': 'test', + 'type': 'mixed', + 'vserver': 'ansible_test', + 'ports': ['a1', 'a2'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' +} + + +portset_info = { + 'num-records': 1, + 'attributes-list': { + 'portset-info': { + 'portset-name': 'test', + 'vserver': 'ansible_test', + 'portset-type': 'mixed', + 'portset-port-total': '2', + 'portset-port-info': [ + {'portset-port-name': 'a1'}, + {'portset-port-name': 'a2'} + ] + } + } +} + + +ZRR = zapi_responses({ + 'portset_info': build_zapi_response(portset_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "name", "vserver"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_ensure_portset_get_called(): + ''' a more interesting test ''' + register_responses([ + ('portset-get-iter', ZRR['empty']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + portset = my_obj.portset_get() + assert portset is None + + +def test_create_portset(): + ''' Test successful create ''' + register_responses([ + ('portset-get-iter', ZRR['empty']), + ('portset-create', ZRR['success']), + ('portset-add', ZRR['success']), + ('portset-add', ZRR['success']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_modify_ports(): + ''' Test modify_portset method ''' + register_responses([ + ('portset-get-iter', ZRR['portset_info']), + ('portset-add', ZRR['success']), + ('portset-add', ZRR['success']), + ('portset-remove', ZRR['success']), + ('portset-remove', ZRR['success']) + ]) + args = {'ports': ['l1', 'l2']} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_portset(): + ''' Test successful delete ''' + register_responses([ + ('portset-get-iter', ZRR['portset_info']), + ('portset-destroy', ZRR['success']) + ]) + args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_error_type_create(): + register_responses([ + ('portset-get-iter', ZRR['empty']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['type'] + error = 'Error: Missing required parameter for create (type)' + assert error in create_and_apply(my_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('portset-get-iter', ZRR['error']), + ('portset-create', ZRR['error']), + ('portset-add', ZRR['error']), + ('portset-remove', ZRR['error']), + ('portset-destroy', ZRR['error']) + ]) + portset_obj = create_module(my_module, DEFAULT_ARGS) + + error = expect_and_capture_ansible_exception(portset_obj.portset_get, 'fail')['msg'] + assert 'Error fetching portset' in error + + error = expect_and_capture_ansible_exception(portset_obj.create_portset, 'fail')['msg'] + assert 'Error creating portse' in error + + error = expect_and_capture_ansible_exception(portset_obj.modify_port, 'fail', 'a1', 'portset-add', 'adding')['msg'] + assert 'Error adding port in portset' in error + + error = expect_and_capture_ansible_exception(portset_obj.modify_port, 'fail', 'a2', 'portset-remove', 'removing')['msg'] + assert 'Error removing port in portset' in error + + error = expect_and_capture_ansible_exception(portset_obj.delete_portset, 'fail')['msg'] + assert 'Error deleting portset' in error + + +SRR = rest_responses({ + 'mixed_portset_info': (200, {"records": [{ + "interfaces": [ + { + "fc": { + "name": "lif_1", + "uuid": "d229cc03" + } + }, + { + "ip": { + "name": "lif_2", + "uuid": "1cd8a442" + } + } + ], + "name": "mixed_ps", + "protocol": "mixed", + "uuid": "312aa85b" + }], "num_records": 1}, None), + 'fc_portset_info': (200, {"records": [{ + "interfaces": [ + { + "fc": { + "name": "fc_1", + "uuid": "3a09cd42" + } + }, + { + "fc": { + "name": "fc_2", + "uuid": "d24e03c6" + } + } + ], + "name": "fc_ps", + "protocol": "fcp", + "uuid": "5056b3b297" + }], "num_records": 1}, None), + 'lif_1': (200, { + "num_records": 1, + "records": [{"uuid": "d229cc03"}] + }, None), + 'lif_2': (200, { + "num_records": 1, + "records": [{"uuid": "d24e03c6"}] + }, None), + 'fc_1': (200, { + "num_records": 1, + "records": [{"uuid": "3a09cd42"}] + }, None), + 'fc_2': (200, { + "num_records": 1, + "records": [{"uuid": "1cd8b542"}] + }, None) +}) + + +def test_create_portset_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['empty_records']), + ('GET', 'network/ip/interfaces', SRR['empty_records']), + ('GET', 'network/fc/interfaces', SRR['lif_1']), + ('GET', 'network/ip/interfaces', SRR['lif_2']), + ('GET', 'network/fc/interfaces', SRR['empty_records']), + ('POST', 'protocols/san/portsets', SRR['success']) + ]) + args = {'use_rest': 'always'} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_create_portset_idempotency_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1", "lif_2"]} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_modify_remove_ports_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('DELETE', 'protocols/san/portsets/312aa85b/interfaces/1cd8a442', SRR['success']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1"]} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_modify_add_ports_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('GET', 'network/ip/interfaces', SRR['empty_records']), + ('GET', 'network/fc/interfaces', SRR['fc_1']), + ('POST', 'protocols/san/portsets/312aa85b/interfaces', SRR['success']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1", "lif_2", "fc_1"]} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_portset_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('DELETE', 'protocols/san/portsets/312aa85b', SRR['success']) + ]) + args = {'use_rest': 'always', 'state': 'absent', 'ports': ['lif_1', 'lif_2']} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_get_portset_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['generic_error']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1", "lif_2", "fc_1"]} + error = 'Error fetching portset' + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_create_portset_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['empty_records']), + ('POST', 'protocols/san/portsets', SRR['generic_error']) + ]) + args = {'use_rest': 'always', "ports": []} + error = 'Error creating portset' + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_delete_portset_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('DELETE', 'protocols/san/portsets/312aa85b', SRR['generic_error']) + ]) + args = {'use_rest': 'always', 'state': 'absent', "ports": ["lif_1", "lif_2"]} + error = 'Error deleting portset' + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_add_portset_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('GET', 'network/ip/interfaces', SRR['empty_records']), + ('GET', 'network/fc/interfaces', SRR['fc_1']), + ('POST', 'protocols/san/portsets/312aa85b/interfaces', SRR['generic_error']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1", "lif_2", "fc_1"]} + error = "Error adding port in portset" + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_remove_portset_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('DELETE', 'protocols/san/portsets/312aa85b/interfaces/1cd8a442', SRR['generic_error']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1"]} + error = "Error removing port in portset" + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_add_ip_port_to_fc_error_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['fc_portset_info']), + ('GET', 'network/fc/interfaces', SRR['empty_records']) + ]) + args = {'use_rest': 'always', "type": "fcp", "ports": ["fc_1", "fc_2", "lif_2"]} + error = 'Error: lifs: lif_2 of type fcp not found in vserver' + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_get_lif_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']), + ('GET', 'network/ip/interfaces', SRR['generic_error']), + ('GET', 'network/fc/interfaces', SRR['generic_error']) + ]) + args = {'use_rest': 'always', "ports": ["lif_1", "lif_2", "fc_1"]} + error = "Error fetching lifs details for fc_1" + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_try_to_modify_protocol_error_rest(): + ''' Test modify_portset method ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'protocols/san/portsets', SRR['mixed_portset_info']) + ]) + args = {'use_rest': 'always', "type": "iscsi", "ports": ["lif_1", "lif_2"]} + error = "modify protocol(type) not supported" + assert error in create_and_apply(my_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_invalid_value_port_rest(): + ''' Test invalid error ''' + args = {'use_rest': 'always', "type": "iscsi", "ports": ["lif_1", ""]} + error = "Error: invalid value specified for ports" + assert error in create_module(my_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_module_ontap_9_9_0_rest_auto(): + ''' Test fall back to ZAPI ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + args = {'use_rest': 'auto'} + assert create_module(my_module, DEFAULT_ARGS, args).use_rest is False + + +def test_module_ontap_9_9_0_rest_always(): + ''' Test error when rest below 9.9.1 ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + args = {'use_rest': 'always'} + msg = "Error: REST requires ONTAP 9.9.1 or later for portset APIs." + assert msg in create_module(my_module, DEFAULT_ARGS, args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_publickey.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_publickey.py new file mode 100644 index 000000000..d72d8c8eb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_publickey.py @@ -0,0 +1,471 @@ +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP publickey Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible, assert_warning_was_raised, assert_no_warnings, print_warnings + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_publickey \ + import NetAppOntapPublicKey as my_module, main as uut_main # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + return { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'account': 'user123', + 'public_key': '161245ASDF', + 'vserver': 'vserver', + } + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'one_pk_record': (200, { + "records": [{ + 'account': dict(name='user123'), + 'owner': dict(uuid='98765'), + 'public_key': '161245ASDF', + 'index': 12, + 'comment': 'comment_123', + }], + 'num_records': 1 + }, None), + 'two_pk_records': (200, { + "records": [{ + 'account': dict(name='user123'), + 'owner': dict(uuid='98765'), + 'public_key': '161245ASDF', + 'index': 12, + 'comment': 'comment_123', + }, + { + 'account': dict(name='user123'), + 'owner': dict(uuid='98765'), + 'public_key': '161245ASDF', + 'index': 13, + 'comment': 'comment_123', + }], + 'num_records': 2 + }, None) +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments: account' + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_get_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['index'] = 12 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_create_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['index'] = 13 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_create_idempotent(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'always' + args['index'] = 12 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_create_always_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + print_warnings() + assert_warning_was_raised('Module is not idempotent if index is not provided with state=present.') + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_modify_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['index'] = 12 + args['comment'] = 'new_comment' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['empty_good'], # modify + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['index'] = 12 + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_idempotent(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'always' + args['index'] = 12 + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_failed_N_records(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['two_pk_records'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error: index is required as more than one public_key exists for user account user123' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_succeeded_N_records(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['state'] = 'absent' + args['delete_all'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['two_pk_records'], # get + SRR['empty_good'], # delete + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_succeeded_N_records_cluster(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['state'] = 'absent' + args['delete_all'] = True + args['vserver'] = None # cluster scope + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['two_pk_records'], # get + SRR['empty_good'], # delete + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + uut_main() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_extra_record(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['state'] = 'present' + args['index'] = 14 + args['vserver'] = None # cluster scope + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['two_pk_records'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + uut_main() + print('Info: %s' % exc.value.args[0]) + msg = 'Error in get_public_key: calling: security/authentication/publickeys: unexpected response' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_extra_arg_in_modify(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['state'] = 'present' + args['index'] = 14 + args['vserver'] = None # cluster scope + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + uut_main() + print('Info: %s' % exc.value.args[0]) + msg = "Error: attributes not supported in modify: {'index': 14}" + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_empty_body_in_modify(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['end_of_sequence'] + ] + current = dict(owner=dict(uuid=''), account=dict(name=''), index=0) + modify = {} + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_public_key(current, modify) + print('Info: %s' % exc.value.args[0]) + msg = 'Error: nothing to change - modify called with: {}' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_create_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['index'] = 13 + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['generic_error'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error in create_public_key: Expected error' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_delete_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['index'] = 12 + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['generic_error'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error in delete_public_key: Expected error' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_modify_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['index'] = 12 + args['comment'] = 'change_me' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['one_pk_record'], # get + SRR['generic_error'], # modify + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + msg = 'Error in modify_public_key: Expected error' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_older_version(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'auto' + args['index'] = 12 + args['comment'] = 'change_me' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_6'], # get version + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = my_module() + print('Info: %s' % exc.value.args[0]) + msg = 'Error: na_ontap_publickey only supports REST, and requires ONTAP 9.7.0 or later. Found: 9.6.0' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_negative_zapi_only(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['index'] = 12 + args['comment'] = 'change_me' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_6'], # get version + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = my_module() + print('Info: %s' % exc.value.args[0]) + msg = 'Error: REST is required for this module, found: "use_rest: never"' + assert msg in exc.value.args[0]['msg'] + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py new file mode 100644 index 000000000..f568ed17a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_adaptive_policy_group.py @@ -0,0 +1,313 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group \ + import NetAppOntapAdaptiveQosPolicyGroup as qos_policy_group_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'policy': + xml = self.build_policy_group_info(self.params) + if self.kind == 'error': + error = netapp_utils.zapi.NaApiError('test', 'error') + raise error + self.xml_out = xml + return xml + + @staticmethod + def build_policy_group_info(vol_details): + ''' build xml data for volume-attributes ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'qos-adaptive-policy-group-info': { + 'absolute-min-iops': '50IOPS', + 'expected-iops': '150IOPS/TB', + 'peak-iops': '220IOPS/TB', + 'peak-iops-allocation': 'used_space', + 'num-workloads': 0, + 'pgid': 6941, + 'policy-group': vol_details['name'], + 'vserver': vol_details['vserver'] + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_policy_group = { + 'name': 'policy_1', + 'vserver': 'policy_vserver', + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space' + } + + def mock_args(self): + return { + 'name': self.mock_policy_group['name'], + 'vserver': self.mock_policy_group['vserver'], + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': 'False' + } + + def get_policy_group_mock_object(self, kind=None): + """ + Helper method to return an na_ontap_volume object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_volume object + """ + policy_obj = qos_policy_group_module() + policy_obj.autosupport_log = Mock(return_value=None) + policy_obj.cluster = Mock() + policy_obj.cluster.invoke_successfully = Mock() + if kind is None: + policy_obj.server = MockONTAPConnection() + else: + policy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_policy_group) + return policy_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + qos_policy_group_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_nonexistent_policy(self): + ''' Test if get_policy_group returns None for non-existent policy_group ''' + set_module_args(self.mock_args()) + result = self.get_policy_group_mock_object().get_policy_group() + assert result is None + + def test_get_existing_policy_group(self): + ''' Test if get_policy_group returns details for existing policy_group ''' + set_module_args(self.mock_args()) + result = self.get_policy_group_mock_object('policy').get_policy_group() + assert result['name'] == self.mock_policy_group['name'] + assert result['vserver'] == self.mock_policy_group['vserver'] + + def test_create_error_missing_param(self): + ''' Test if create throws an error if name is not specified''' + data = self.mock_args() + del data['name'] + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_group_mock_object('policy').create_policy_group() + msg = 'missing required arguments: name' + assert exc.value.args[0]['msg'] == msg + + def test_successful_create(self): + ''' Test successful create ''' + data = self.mock_args() + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object().apply() + assert exc.value.args[0]['changed'] + + def test_create_idempotency(self): + ''' Test create idempotency ''' + set_module_args(self.mock_args()) + obj = self.get_policy_group_mock_object('policy') + with pytest.raises(AnsibleExitJson) as exc: + obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group') + def test_create_error(self, get_policy_group): + ''' Test create error ''' + set_module_args(self.mock_args()) + get_policy_group.side_effect = [ + None + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_group_mock_object('error').apply() + assert exc.value.args[0]['msg'] == 'Error creating adaptive qos policy group policy_1: NetApp API failed. Reason - test:error' + + def test_successful_delete(self): + ''' Test delete existing volume ''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + def test_delete_idempotency(self): + ''' Test delete idempotency ''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group') + def test_delete_error(self, get_policy_group): + ''' Test create idempotency''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + current = { + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space', + 'name': 'policy_1', + 'vserver': 'policy_vserver' + } + get_policy_group.side_effect = [ + current + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_group_mock_object('error').apply() + assert exc.value.args[0]['msg'] == 'Error deleting adaptive qos policy group policy_1: NetApp API failed. Reason - test:error' + + def test_successful_modify_expected_iops(self): + ''' Test successful modify expected iops ''' + data = self.mock_args() + data['expected_iops'] = '175IOPS' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + def test_modify_expected_iops_idempotency(self): + ''' Test modify idempotency ''' + data = self.mock_args() + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object('policy').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group') + def test_modify_error(self, get_policy_group): + ''' Test create idempotency ''' + data = self.mock_args() + data['expected_iops'] = '175IOPS' + set_module_args(data) + current = { + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space', + 'name': 'policy_1', + 'vserver': 'policy_vserver' + } + get_policy_group.side_effect = [ + current + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_group_mock_object('error').apply() + assert exc.value.args[0]['msg'] == 'Error modifying adaptive qos policy group policy_1: NetApp API failed. Reason - test:error' + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group') + def test_rename(self, get_policy_group): + ''' Test rename idempotency ''' + data = self.mock_args() + data['name'] = 'policy_2' + data['from_name'] = 'policy_1' + set_module_args(data) + current = { + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space', + 'name': 'policy_1', + 'vserver': 'policy_vserver' + } + get_policy_group.side_effect = [ + None, + current + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object('policy').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group') + def test_rename_idempotency(self, get_policy_group): + ''' Test rename idempotency ''' + data = self.mock_args() + data['name'] = 'policy_1' + data['from_name'] = 'policy_1' + current = { + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space', + 'name': 'policy_1', + 'vserver': 'policy_vserver' + } + get_policy_group.side_effect = [ + current, + current + ] + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_policy_group_mock_object('policy').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_adaptive_policy_group.NetAppOntapAdaptiveQosPolicyGroup.get_policy_group') + def test_rename_error(self, get_policy_group): + ''' Test create idempotency ''' + data = self.mock_args() + data['from_name'] = 'policy_1' + data['name'] = 'policy_2' + set_module_args(data) + current = { + 'absolute_min_iops': '50IOPS', + 'expected_iops': '150IOPS/TB', + 'peak_iops': '220IOPS/TB', + 'peak_iops_allocation': 'used_space', + 'is_shared': 'true', + 'name': 'policy_1', + 'vserver': 'policy_vserver' + } + get_policy_group.side_effect = [ + None, + current + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_policy_group_mock_object('error').apply() + assert exc.value.args[0]['msg'] == 'Error renaming adaptive qos policy group policy_1: NetApp API failed. Reason - test:error' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py new file mode 100644 index 000000000..c14b13151 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qos_policy_group.py @@ -0,0 +1,578 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_qos_policy_group \ + import NetAppOntapQosPolicyGroup as qos_policy_group_module # module under test +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'name': 'policy_1', + 'vserver': 'policy_vserver', + 'max_throughput': '800KB/s,800IOPS', + 'is_shared': True, + 'min_throughput': '100IOPS', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': 'True', + 'use_rest': 'never' +} + + +qos_policy_group_info = { + 'num-records': 1, + 'attributes-list': { + 'qos-policy-group-info': { + 'is-shared': 'true', + 'max-throughput': '800KB/s,800IOPS', + 'min-throughput': '100IOPS', + 'num-workloads': 0, + 'pgid': 8690, + 'policy-group': 'policy_1', + 'vserver': 'policy_vserver' + } + } +} + + +ZRR = zapi_responses({ + 'qos_policy_info': build_zapi_response(qos_policy_group_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + qos_policy_group_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_policy(): + ''' Test if get_policy_group returns None for non-existent policy_group ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']) + ]) + qos_policy_obj = create_module(qos_policy_group_module, DEFAULT_ARGS) + result = qos_policy_obj.get_policy_group() + assert result is None + + +def test_get_existing_policy_group(): + ''' Test if get_policy_group returns details for existing policy_group ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']) + ]) + qos_policy_obj = create_module(qos_policy_group_module, DEFAULT_ARGS) + result = qos_policy_obj.get_policy_group() + assert result['name'] == DEFAULT_ARGS['name'] + assert result['vserver'] == DEFAULT_ARGS['vserver'] + + +def test_create_error_missing_param(): + ''' Test if create throws an error if name is not specified''' + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['name'] + error = create_module(qos_policy_group_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + assert 'missing required arguments: name' in error + + +def test_error_if_fixed_qos_options_present(): + ''' Test hrows an error if fixed_qos_options is specified in ZAPI''' + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['max_throughput'] + del DEFAULT_ARGS_COPY['min_throughput'] + del DEFAULT_ARGS_COPY['is_shared'] + DEFAULT_ARGS_COPY['fixed_qos_options'] = {'max_throughput_iops': 100} + error = create_module(qos_policy_group_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + assert "Error: 'fixed_qos_options' not supported with ZAPI, use 'max_throughput' and 'min_throughput'" in error + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']), + ('qos-policy-group-create', ZRR['success']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS)['changed'] + + +def test_create_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS)['changed'] is False + + +def test_create_error(): + ''' Test create error ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']), + ('qos-policy-group-create', ZRR['error']) + ]) + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'Error creating qos policy group policy_1' in error + + +def test_successful_delete(): + ''' Test delete existing volume ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']), + ('qos-policy-group-delete', ZRR['success']) + ]) + args = { + 'state': 'absent', + 'force': True + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] is False + + +def test_delete_error(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']), + ('qos-policy-group-delete', ZRR['error']) + ]) + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, {'state': 'absent'}, fail=True)['msg'] + assert 'Error deleting qos policy group policy_1' in error + + +def test_successful_modify_max_throughput(): + ''' Test successful modify max throughput ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']), + ('qos-policy-group-modify', ZRR['success']) + ]) + args = {'max_throughput': '900KB/s,800iops'} + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args)['changed'] + + +def test_modify_max_throughput_idempotency(): + ''' Test modify idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS)['changed'] is False + + +def test_modify_error(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']), + ('qos-policy-group-modify', ZRR['error']) + ]) + args = {'max_throughput': '900KB/s,800iops'} + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error modifying qos policy group policy_1' in error + + +def test_modify_is_shared_error(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']) + ]) + args = { + 'is_shared': False, + 'max_throughput': '900KB/s,900IOPS' + } + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert "Error cannot modify 'is_shared' attribute." in error + + +def test_rename(): + ''' Test rename idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']), + ('qos-policy-group-get-iter', ZRR['qos_policy_info']), + ('qos-policy-group-rename', ZRR['success']) + ]) + args = { + 'name': 'policy_2', + 'from_name': 'policy_1' + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args)['changed'] + + +def test_rename_idempotency(): + ''' Test rename idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['qos_policy_info']) + ]) + args = { + 'from_name': 'policy_1' + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_rename_error(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']), + ('qos-policy-group-get-iter', ZRR['qos_policy_info']), + ('qos-policy-group-rename', ZRR['error']) + ]) + args = { + 'name': 'policy_2', + 'from_name': 'policy_1' + } + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error renaming qos policy group policy_1' in error + + +def test_rename_non_existent_policy(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['empty']), + ('qos-policy-group-get-iter', ZRR['empty']) + ]) + args = { + 'name': 'policy_10', + 'from_name': 'policy_11' + } + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error renaming qos policy group: cannot find' in error + + +def test_get_policy_error(): + ''' Test create idempotency ''' + register_responses([ + ('qos-policy-group-get-iter', ZRR['error']) + ]) + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'Error fetching qos policy group' in error + + +DEFAULT_ARGS_REST = { + 'name': 'policy_1', + 'vserver': 'policy_vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': 'True', + 'use_rest': 'always', + 'fixed_qos_options': { + 'capacity_shared': False, + 'max_throughput_iops': 1000, + 'max_throughput_mbps': 100, + 'min_throughput_iops': 100, + 'min_throughput_mbps': 50 + } +} + + +SRR = rest_responses({ + 'qos_policy_info': (200, {"records": [ + { + "uuid": "e4f703dc-bfbc-11ec-a164-005056b3bd39", + "svm": {"name": "policy_vserver"}, + "name": "policy_1", + "fixed": { + "max_throughput_iops": 1000, + "max_throughput_mbps": 100, + "min_throughput_iops": 100, + 'min_throughput_mbps': 50, + "capacity_shared": False + } + } + ], 'num_records': 1}, None), + 'adaptive_policy_info': (200, {"records": [ + { + 'uuid': '30d2fdd6-c45a-11ec-a164-005056b3bd39', + 'svm': {'name': 'policy_vserver'}, + 'name': 'policy_1_', + 'adaptive': { + 'expected_iops': 200, + 'peak_iops': 500, + 'absolute_min_iops': 100 + } + } + ], 'num_records': 1}, None) +}) + + +def test_successful_create_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']), + ('POST', 'storage/qos/policies', SRR['success']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST)['changed'] + + +def test_create_idempotency_rest(): + ''' Test create idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST)['changed'] is False + + +def test_successful_create_adaptive_rest(): + ''' Test successful create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']), + ('POST', 'storage/qos/policies', SRR['success']), + # with block size + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/qos/policies', SRR['empty_records']), + ('POST', 'storage/qos/policies', SRR['success']), + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_COPY['fixed_qos_options'] + DEFAULT_ARGS_COPY['adaptive_qos_options'] = { + "absolute_min_iops": 100, + "expected_iops": 200, + "peak_iops": 500 + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_COPY)['changed'] + DEFAULT_ARGS_COPY['adaptive_qos_options']['block_size'] = '4k' + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_COPY)['changed'] + + +def test_partially_supported_option_rest(): + ''' Test delete error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + error = create_module(qos_policy_group_module, DEFAULT_ARGS_REST, fail=True)['msg'] + assert "Minimum version of ONTAP for 'fixed_qos_options.min_throughput_mbps' is (9, 8, 0)" in error + DEFAULT_ARGS_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_COPY['fixed_qos_options'] + DEFAULT_ARGS_COPY['adaptive_qos_options'] = { + "absolute_min_iops": 100, + "expected_iops": 200, + "peak_iops": 500, + "block_size": "4k" + } + error = create_module(qos_policy_group_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + assert "Minimum version of ONTAP for 'adaptive_qos_options.block_size' is (9, 10, 1)" in error + + +def test_error_create_adaptive_rest(): + ''' Test successful create ''' + DEFAULT_ARGS_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_COPY['fixed_qos_options'] + DEFAULT_ARGS_COPY['adaptive_qos_options'] = { + "absolute_min_iops": 100, + "expected_iops": 200 + } + error = create_module(qos_policy_group_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + assert "missing required arguments: peak_iops found in adaptive_qos_options" in error + + +def test_create_error_rest(): + ''' Test create error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']), + ('POST', 'storage/qos/policies', SRR['generic_error']), + ]) + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, fail=True)['msg'] + assert 'Error creating qos policy group policy_1' in error + + +def test_successful_delete_rest(): + ''' Test delete existing volume ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ('DELETE', 'storage/qos/policies/e4f703dc-bfbc-11ec-a164-005056b3bd39', SRR['success']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, {'state': 'absent'})['changed'] + + +def test_delete_idempotency_rest(): + ''' Test delete idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, {'state': 'absent'})['changed'] is False + + +def test_create_error_fixed_adaptive_qos_options_missing(): + ''' Error if fixed_qos_optios not present in create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']) + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_COPY['fixed_qos_options'] + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + assert "Error: atleast one throughput in 'fixed_qos_options' or all 'adaptive_qos_options'" in error + + +def test_delete_error_rest(): + ''' Test delete error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ('DELETE', 'storage/qos/policies/e4f703dc-bfbc-11ec-a164-005056b3bd39', SRR['generic_error']) + ]) + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, {'state': 'absent'}, fail=True)['msg'] + assert 'Error deleting qos policy group policy_1' in error + + +def test_successful_modify_max_throughput_rest(): + ''' Test successful modify max throughput ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ('PATCH', 'storage/qos/policies/e4f703dc-bfbc-11ec-a164-005056b3bd39', SRR['success']) + ]) + args = {'fixed_qos_options': { + 'max_throughput_iops': 2000, + 'max_throughput_mbps': 300, + 'min_throughput_iops': 400, + 'min_throughput_mbps': 700 + }} + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_modify_max_throughput_idempotency_rest(): + ''' Test modify idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']) + ]) + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST)['changed'] is False + + +def test_successful_modify_adaptive_qos_options_rest(): + ''' Test successful modify max throughput ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/qos/policies', SRR['adaptive_policy_info']), + ('PATCH', 'storage/qos/policies/30d2fdd6-c45a-11ec-a164-005056b3bd39', SRR['success']) + ]) + DEFAULT_ARGS_REST_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_REST_COPY['fixed_qos_options'] + args = { + 'adaptive_qos_options': { + 'expected_iops': 300, + 'peak_iops': 600, + 'absolute_min_iops': 200, + 'block_size': '4k' + } + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST_COPY, args)['changed'] + + +def test_error_adaptive_qos_options_zapi(): + ''' Test error adaptive_qos_options zapi ''' + DEFAULT_ARGS_REST_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_REST_COPY['fixed_qos_options'] + DEFAULT_ARGS_REST_COPY['use_rest'] = 'never' + args = { + 'adaptive_qos_options': { + 'expected_iops': 300, + 'peak_iops': 600, + 'absolute_min_iops': 200 + } + } + error = create_module(qos_policy_group_module, DEFAULT_ARGS_REST_COPY, args, fail=True)['msg'] + assert "Error: use 'na_ontap_qos_adaptive_policy_group' module for create/modify/delete adaptive policy with ZAPI" in error + + +def test_modify_error_rest(): + ''' Test modify error rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ('PATCH', 'storage/qos/policies/e4f703dc-bfbc-11ec-a164-005056b3bd39', SRR['generic_error']) + ]) + args = {'fixed_qos_options': { + 'max_throughput_iops': 2000, + 'max_throughput_mbps': 300, + 'min_throughput_iops': 400, + 'min_throughput_mbps': 700 + }} + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, args, fail=True)['msg'] + assert 'Error modifying qos policy group policy_1' in error + + +def test_rename_rest(): + ''' Test rename ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ('PATCH', 'storage/qos/policies/e4f703dc-bfbc-11ec-a164-005056b3bd39', SRR['success']) + ]) + args = { + 'name': 'policy_2', + 'from_name': 'policy_1' + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_rename_idempotency_rest(): + ''' Test rename idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']) + ]) + args = { + 'from_name': 'policy_1' + } + assert create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, args)['changed'] is False + + +def test_rename_error_rest(): + ''' Test create idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['empty_records']), + ('GET', 'storage/qos/policies', SRR['qos_policy_info']), + ('PATCH', 'storage/qos/policies/e4f703dc-bfbc-11ec-a164-005056b3bd39', SRR['generic_error']) + ]) + args = { + 'name': 'policy_2', + 'from_name': 'policy_1' + } + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, args, fail=True)['msg'] + assert 'Error renaming qos policy group policy_1' in error + + +def test_get_policy_error_rest(): + ''' Test get policy error rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/qos/policies', SRR['generic_error']) + ]) + error = create_and_apply(qos_policy_group_module, DEFAULT_ARGS_REST, fail=True)['msg'] + assert 'Error fetching qos policy group policy_1' in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py new file mode 100644 index 000000000..e88fcb852 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_qtree.py @@ -0,0 +1,404 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_quotas ''' +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + call_main, create_module, create_and_apply, expect_and_capture_ansible_exception, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_qtree \ + import NetAppOntapQTree as qtree_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + 'state': 'present', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'ansible', + 'vserver': 'ansible', + 'flexvol_name': 'ansible', + 'export_policy': 'ansible', + 'security_style': 'unix', + 'unix_permissions': '755', + 'use_rest': 'never' +} + + +qtree_info = { + 'num-records': 1, + 'attributes-list': { + 'qtree-info': { + 'export-policy': 'ansible', + 'vserver': 'ansible', + 'qtree': 'ansible', + 'oplocks': 'enabled', + 'security-style': 'unix', + 'mode': '755', + 'volume': 'ansible' + } + } +} + + +ZRR = zapi_responses({ + 'qtree_info': build_zapi_response(qtree_info) +}) + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'qtree_record': (200, {"records": [{ + "svm": {"name": "ansible"}, + "id": 1, + "name": "ansible", + "security_style": "unix", + "unix_permissions": 755, + "export_policy": {"name": "ansible"}, + "volume": {"uuid": "uuid", "name": "volume1"}} + ]}, None), + 'job_info': (200, { + "job": { + "uuid": "d78811c1-aebc-11ec-b4de-005056b30cfa", + "_links": {"self": {"href": "/api/cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa"}} + }}, None), + 'job_not_found': (404, "", {"message": "entry doesn't exist", "code": "4", "target": "uuid"}) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "name", "vserver", "flexvol_name"] + error = create_module(qtree_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_ensure_get_called(): + ''' test get_qtree for non-existent qtree''' + register_responses([ + ('qtree-list-iter', ZRR['empty']) + ]) + my_obj = create_module(qtree_module, DEFAULT_ARGS) + portset = my_obj.get_qtree() + assert portset is None + + +def test_ensure_get_called_existing(): + ''' test get_qtree for existing qtree''' + register_responses([ + ('qtree-list-iter', ZRR['qtree_info']) + ]) + my_obj = create_module(qtree_module, DEFAULT_ARGS) + assert my_obj.get_qtree() + + +def test_successful_create(): + ''' creating qtree ''' + register_responses([ + ('qtree-list-iter', ZRR['empty']), + ('qtree-create', ZRR['success']) + ]) + module_args = { + 'oplocks': 'enabled' + } + assert create_and_apply(qtree_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete(): + ''' deleting qtree ''' + register_responses([ + ('qtree-list-iter', ZRR['qtree_info']), + ('qtree-delete', ZRR['success']) + ]) + args = {'state': 'absent'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_delete_idempotency(): + ''' deleting qtree idempotency ''' + register_responses([ + ('qtree-list-iter', ZRR['empty']) + ]) + args = {'state': 'absent'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_modify(): + ''' modifying qtree ''' + register_responses([ + ('qtree-list-iter', ZRR['qtree_info']), + ('qtree-modify', ZRR['success']) + ]) + args = { + 'export_policy': 'test', + 'oplocks': 'enabled' + } + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] + + +def test_failed_rename(): + ''' test error rename qtree ''' + register_responses([ + ('qtree-list-iter', ZRR['empty']), + ('qtree-list-iter', ZRR['empty']) + ]) + args = {'from_name': 'test'} + error = 'Error renaming: qtree %s does not exist' % args['from_name'] + assert error in create_and_apply(qtree_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_successful_rename(): + ''' rename qtree ''' + register_responses([ + ('qtree-list-iter', ZRR['empty']), + ('qtree-list-iter', ZRR['qtree_info']), + ('qtree-rename', ZRR['success']) + ]) + args = {'from_name': 'ansible_old'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception(): + ''' test error zapi - get/create/rename/modify/delete''' + register_responses([ + ('qtree-list-iter', ZRR['error']), + ('qtree-create', ZRR['error']), + ('qtree-rename', ZRR['error']), + ('qtree-modify', ZRR['error']), + ('qtree-delete', ZRR['error']) + ]) + qtree_obj = create_module(qtree_module, DEFAULT_ARGS, {'from_name': 'name'}) + + assert 'Error fetching qtree' in expect_and_capture_ansible_exception(qtree_obj.get_qtree, 'fail')['msg'] + assert 'Error creating qtree' in expect_and_capture_ansible_exception(qtree_obj.create_qtree, 'fail')['msg'] + assert 'Error renaming qtree' in expect_and_capture_ansible_exception(qtree_obj.rename_qtree, 'fail')['msg'] + assert 'Error modifying qtree' in expect_and_capture_ansible_exception(qtree_obj.modify_qtree, 'fail')['msg'] + assert 'Error deleting qtree' in expect_and_capture_ansible_exception(qtree_obj.delete_qtree, 'fail')['msg'] + + +def test_get_error_rest(): + ''' test get qtree error in rest''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['generic_error']) + ]) + error = 'Error fetching qtree' + assert error in create_and_apply(qtree_module, DEFAULT_ARGS, {'use_rest': 'always'}, 'fail')['msg'] + + +def test_create_error_rest(): + ''' test get qtree error in rest''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['empty_records']), + ('POST', 'storage/qtrees', SRR['generic_error']) + ]) + error = 'Error creating qtree' + assert error in create_and_apply(qtree_module, DEFAULT_ARGS, {'use_rest': 'always'}, 'fail')['msg'] + + +def test_modify_error_rest(): + ''' test get qtree error in rest''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ('PATCH', 'storage/qtrees/uuid/1', SRR['generic_error']) + ]) + args = {'use_rest': 'always', 'unix_permissions': '777'} + error = 'Error modifying qtree' + assert error in create_and_apply(qtree_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_rename_error_rest(): + ''' test get qtree error in rest''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['empty_records']), + ('GET', 'storage/qtrees', SRR['empty_records']) + ]) + args = {'use_rest': 'always', 'from_name': 'abcde', 'name': 'qtree'} + error = 'Error renaming: qtree' + assert error in create_and_apply(qtree_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_delete_error_rest(): + ''' test get qtree error in rest''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ('DELETE', 'storage/qtrees/uuid/1', SRR['generic_error']) + ]) + args = {'use_rest': 'always', 'state': 'absent'} + error = 'Error deleting qtree' + assert error in create_and_apply(qtree_module, DEFAULT_ARGS, args, 'fail')['msg'] + + +def test_successful_create_rest(): + ''' test create qtree rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['empty_records']), + ('POST', 'storage/qtrees', SRR['success']) + ]) + assert create_and_apply(qtree_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + + +def test_idempotent_create_rest(): + ''' test create qtree idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']) + ]) + assert create_and_apply(qtree_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] is False + + +@patch('time.sleep') +def test_successful_create_rest_job_error(sleep): + ''' test create qtree rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['empty_records']), + ('POST', 'storage/qtrees', SRR['job_info']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']) + ]) + assert create_and_apply(qtree_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + print_warnings() + assert_warning_was_raised('Ignoring job status, assuming success.') + + +def test_successful_delete_rest(): + ''' test delete qtree rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ('DELETE', 'storage/qtrees/uuid/1', SRR['success']) + ]) + args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] + + +def test_idempotent_delete_rest(): + ''' test delete qtree idempotency''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['empty_records']) + ]) + args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_modify_rest(): + ''' test modify qtree rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ('PATCH', 'storage/qtrees/uuid/1', SRR['success']) + ]) + args = {'use_rest': 'always', 'unix_permissions': '777'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] + + +def test_idempotent_modify_rest(): + ''' test modify qtree idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']) + ]) + args = {'use_rest': 'always'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] is False + + +def test_successful_rename_rest(): + ''' test rename qtree rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['zero_records']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ('PATCH', 'storage/qtrees/uuid/1', SRR['success']) + ]) + args = {'use_rest': 'always', 'from_name': 'abcde', 'name': 'qtree'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] + + +def test_successful_rename_rest_idempotent(): + ''' test rename qtree in rest - idempotency''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']) + ]) + args = {'use_rest': 'always', 'from_name': 'abcde'} + assert create_and_apply(qtree_module, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_rename_and_modify_rest(): + ''' test rename and modify qtree rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['empty_records']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ('PATCH', 'storage/qtrees/uuid/1', SRR['success']) + ]) + args = { + 'use_rest': 'always', + 'from_name': 'abcde', + 'name': 'qtree', + 'unix_permissions': '744', + 'unix_user': 'user', + 'unix_group': 'group', + } + assert call_main(my_main, DEFAULT_ARGS, args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + module_args = { + 'use_rest': 'never' + } + mock_has_netapp_lib.return_value = False + error = 'Error: the python NetApp-Lib module is required. Import error: None' + assert error == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_force_delete_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'storage/qtrees', SRR['qtree_record']), + ]) + module_args = { + 'use_rest': 'always', + 'force_delete': False, + 'state': 'absent' + } + error = 'Error: force_delete option is not supported for REST, unless set to true.' + assert error == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rename_qtree_not_used_with_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(qtree_module, DEFAULT_ARGS, module_args) + error = 'Internal error, use modify with REST' + assert error in expect_and_capture_ansible_exception(my_obj.rename_qtree, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py new file mode 100644 index 000000000..e7eb3283c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quota_policy.py @@ -0,0 +1,174 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_quota_policy ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_quota_policy \ + import NetAppOntapQuotaPolicy as quota_policy_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'quota': + xml = self.build_quota_policy_info(self.params, True) + if self.kind == 'quota_not_assigned': + xml = self.build_quota_policy_info(self.params, False) + elif self.kind == 'zapi_error': + error = netapp_utils.zapi.NaApiError('test', 'error') + raise error + self.xml_out = xml + return xml + + @staticmethod + def build_quota_policy_info(params, assigned): + xml = netapp_utils.zapi.NaElement('xml') + attributes = {'num-records': 1, + 'attributes-list': { + 'quota-policy-info': { + 'policy-name': params['name']}, + 'vserver-info': { + 'quota-policy': params['name'] if assigned else 'default'} + }} + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_quota_policy ''' + + def setUp(self): + self.mock_quota_policy = { + 'state': 'present', + 'vserver': 'test_vserver', + 'name': 'test_policy' + } + + def mock_args(self): + return { + 'state': self.mock_quota_policy['state'], + 'vserver': self.mock_quota_policy['vserver'], + 'name': self.mock_quota_policy['name'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_quota_policy_mock_object(self, kind=None): + policy_obj = quota_policy_module() + if kind is None: + policy_obj.server = MockONTAPConnection() + else: + policy_obj.server = MockONTAPConnection(kind=kind, data=self.mock_quota_policy) + return policy_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + quota_policy_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_successfully_create(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_quota_policy_mock_object().apply() + assert exc.value.args[0]['changed'] + + def test_create_idempotency(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_quota_policy_mock_object('quota').apply() + assert not exc.value.args[0]['changed'] + + def test_cannot_delete(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_quota_policy_mock_object('quota').apply() + msg = 'Error policy test_policy cannot be deleted as it is assigned to the vserver test_vserver' + assert msg == exc.value.args[0]['msg'] + + def test_successfully_delete(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_quota_policy_mock_object('quota_not_assigned').apply() + assert exc.value.args[0]['changed'] + + def test_delete_idempotency(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_quota_policy_mock_object().apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_assign(self): + data = self.mock_args() + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_quota_policy_mock_object('quota_not_assigned').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_quota_policy.NetAppOntapQuotaPolicy.get_quota_policy') + def test_successful_rename(self, get_volume): + data = self.mock_args() + data['name'] = 'new_policy' + data['from_name'] = 'test_policy' + set_module_args(data) + current = { + 'name': 'test_policy' + } + get_volume.side_effect = [ + None, + current + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_quota_policy_mock_object('quota').apply() + assert exc.value.args[0]['changed'] + + def test_error(self): + data = self.mock_args() + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_quota_policy_mock_object('zapi_error').get_quota_policy() + assert exc.value.args[0]['msg'] == 'Error fetching quota policy test_policy: NetApp API failed. Reason - test:error' + with pytest.raises(AnsibleFailJson) as exc: + self.get_quota_policy_mock_object('zapi_error').create_quota_policy() + assert exc.value.args[0]['msg'] == 'Error creating quota policy test_policy: NetApp API failed. Reason - test:error' + with pytest.raises(AnsibleFailJson) as exc: + self.get_quota_policy_mock_object('zapi_error').delete_quota_policy() + assert exc.value.args[0]['msg'] == 'Error deleting quota policy test_policy: NetApp API failed. Reason - test:error' + data['name'] = 'new_policy' + data['from_name'] = 'test_policy' + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + self.get_quota_policy_mock_object('zapi_error').rename_quota_policy() + assert exc.value.args[0]['msg'] == 'Error renaming quota policy test_policy: NetApp API failed. Reason - test:error' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py new file mode 100644 index 000000000..cd03989c6 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_quotas.py @@ -0,0 +1,853 @@ +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_quotas ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings,\ + assert_warning_was_raised, call_main, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_quotas \ + import NetAppONTAPQuotas as my_module, main as my_main + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +SRR = rest_responses({ + # module specific responses + 'quota_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansible" + }, + "files": { + "hard_limit": "100", + "soft_limit": "80" + }, + "qtree": { + "id": "1", + "name": "qt1" + }, + "space": { + "hard_limit": "1222800", + "soft_limit": "51200" + }, + "type": "user", + "user_mapping": False, + "users": [{"name": "quota_user"}], + "uuid": "264a9e0b-2e03-11e9-a610-005056a7b72d", + "volume": {"name": "fv", "uuid": "264a9e0b-2e03-11e9-a610-005056a7b72da"}, + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + }, + } + ], + "num_records": 1 + }, None + ), + 'quota_record_0_empty_limtis': (200, {"records": [{ + "svm": {"name": "ansible"}, + "files": {"hard_limit": 0}, + "qtree": {"id": "1", "name": "qt1"}, + "space": {"hard_limit": 0}, + "type": "user", + "user_mapping": False, + "users": [{"name": "quota_user"}], + "uuid": "264a9e0b-2e03-11e9-a610-005056a7b72d", + "volume": {"name": "fv", "uuid": "264a9e0b-2e03-11e9-a610-005056a7b72da"}, + "target": {"name": "20:05:00:50:56:b3:0c:fa"}, + }], "num_records": 1}, None), + 'quota_status': ( + 200, + { + "records": [ + { + "quota": {"state": "off"} + } + ], + "num_records": 1 + }, None + ), + 'quota_on': ( + 200, + { + "records": [ + { + "quota": {"state": "on"} + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None), + "error_5308572": (409, None, {'code': 5308572, 'message': 'Expected delete error'}), + "error_5308569": (409, None, {'code': 5308569, 'message': 'Expected delete error'}), + "error_5308568": (409, None, {'code': 5308568, 'message': 'Expected create error'}), + "error_5308571": (409, None, {'code': 5308571, 'message': 'Expected create error'}), + "error_5308567": (409, None, {'code': 5308567, 'message': 'Expected modify error'}), + 'error_rest': (404, None, {"message": "temporarily locked from changes", "code": "4", "target": "uuid"}), + "volume_uuid": (200, {"records": [{ + 'uuid': 'sdgthfd' + }], 'num_records': 1}, None), + 'job_info': (200, { + "job": { + "uuid": "d78811c1-aebc-11ec-b4de-005056b30cfa", + "_links": {"self": {"href": "/api/cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa"}} + }}, None), + 'job_not_found': (404, "", {"message": "entry doesn't exist", "code": "4", "target": "uuid"}) +}) + + +quota_policy = { + 'num-records': 1, + 'attributes-list': {'quota-entry': {'volume': 'ansible', 'policy-name': 'policy_name', 'perform-user-mapping': 'true', + 'file-limit': '-', 'disk-limit': '-', 'quota-target': '/vol/ansible', + 'soft-file-limit': '-', 'soft-disk-limit': '-', 'threshold': '-'}}, +} + +quota_policies = { + 'num-records': 2, + 'attributes-list': [{'quota-policy-info': {'policy-name': 'p1'}}, + {'quota-policy-info': {'policy-name': 'p2'}}], +} + +ZRR = zapi_responses({ + 'quota_policy': build_zapi_response(quota_policy, 1), + 'quota_on': build_zapi_response({'status': 'on'}, 1), + 'quota_off': build_zapi_response({'status': 'off'}, 1), + 'quota_policies': build_zapi_response(quota_policies, 1), + 'quota_fail': build_zapi_error('TEST', 'This exception is from the unit test'), + 'quota_fail_13001': build_zapi_error('13001', 'success'), + 'quota_fail_14958': build_zapi_error('14958', 'No valid quota rules found'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'volume': 'ansible', + 'vserver': 'ansible', + 'quota_target': '/vol/ansible', + 'type': 'user', + 'use_rest': 'never' +} + + +def test_module_fail_when_required_args_missing(): + error = create_module(my_module, fail=True)['msg'] + assert 'missing required arguments:' in error + + +def test_ensure_get_called(): + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['empty']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + quotas = my_obj.get_quotas() + print('QUOTAS', quotas) + assert quotas is None + + +def test_ensure_get_quota_not_called(): + args = dict(DEFAULT_ARGS) + args.pop('quota_target') + args.pop('type') + my_obj = create_module(my_module, args) + assert my_obj.get_quotas() is None + + +def test_ensure_get_called_existing(): + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + quotas = my_obj.get_quotas() + print('QUOTAS', quotas) + assert quotas + + +def test_successful_create(): + ''' creating quota and testing idempotency ''' + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['no_records']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ('ZAPI', 'quota-set-entry', ZRR['success']), + ('ZAPI', 'quota-resize', ZRR['success']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ]) + module_args = { + 'file_limit': '3', + 'disk_limit': '4', + 'perform_user_mapping': False, + 'policy': 'policy', + 'soft_file_limit': '3', + 'soft_disk_limit': '4', + 'threshold': '10', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_successful_delete(): + ''' deleting quota and testing idempotency ''' + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ('ZAPI', 'quota-delete-entry', ZRR['success']), + ('ZAPI', 'quota-resize', ZRR['success']), + ('ZAPI', 'quota-list-entries-iter', ZRR['no_records']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ]) + module_args = { + 'policy': 'policy', + 'state': 'absent' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_modify(dont_sleep): + ''' modifying quota and testing idempotency ''' + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ('ZAPI', 'quota-modify-entry', ZRR['success']), + ('ZAPI', 'quota-off', ZRR['success']), + ('ZAPI', 'quota-on', ZRR['success']), + ]) + module_args = { + 'activate_quota_on_change': 'reinitialize', + 'file_limit': '3', + 'policy': 'policy', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_quota_on_off(): + ''' quota set on or off ''' + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-status', ZRR['quota_off']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ('ZAPI', 'quota-off', ZRR['success']), + ]) + module_args = {'set_quota_status': False} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'quota-status', ZRR['quota_fail']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_fail']), + ('ZAPI', 'quota-set-entry', ZRR['quota_fail']), + ('ZAPI', 'quota-delete-entry', ZRR['quota_fail']), + ('ZAPI', 'quota-modify-entry', ZRR['quota_fail']), + ('ZAPI', 'quota-on', ZRR['quota_fail']), + ('ZAPI', 'quota-policy-get-iter', ZRR['quota_fail']), + ('ZAPI', 'quota-resize', ZRR['quota_fail']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert 'Error fetching quotas status info' in expect_and_capture_ansible_exception(my_obj.get_quota_status, 'fail')['msg'] + assert 'Error fetching quotas info' in expect_and_capture_ansible_exception(my_obj.get_quotas, 'fail')['msg'] + assert 'Error adding/modifying quota entry' in expect_and_capture_ansible_exception(my_obj.quota_entry_set, 'fail')['msg'] + assert 'Error deleting quota entry' in expect_and_capture_ansible_exception(my_obj.quota_entry_delete, 'fail')['msg'] + assert 'Error modifying quota entry' in expect_and_capture_ansible_exception(my_obj.quota_entry_modify, 'fail', {})['msg'] + assert 'Error setting quota-on for ansible' in expect_and_capture_ansible_exception(my_obj.on_or_off_quota, 'fail', 'quota-on')['msg'] + assert 'Error fetching quota policies' in expect_and_capture_ansible_exception(my_obj.get_quota_policies, 'fail')['msg'] + assert 'Error setting quota-resize for ansible:' in expect_and_capture_ansible_exception(my_obj.resize_quota, 'fail')['msg'] + + +def test_get_quota_policies(): + register_responses([ + ('ZAPI', 'quota-policy-get-iter', ZRR['quota_policies']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + policies = my_obj.get_quota_policies() + assert len(policies) == 2 + + +def test_debug_quota_get_error_fail(): + register_responses([ + ('ZAPI', 'quota-policy-get-iter', ZRR['quota_policies']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.debug_quota_get_error, 'fail', 'dummy error')['msg'] + assert error.startswith('Error fetching quotas info: dummy error - current vserver policies: ') + + +def test_debug_quota_get_error_success(): + register_responses([ + ('ZAPI', 'quota-policy-get-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + quotas = my_obj.debug_quota_get_error('dummy error') + print('QUOTAS', quotas) + assert quotas + + +def test_get_no_quota_retry_on_13001(): + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_fail_13001']), + ]) + module_args = {'policy': 'policy'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.get_quotas, 'fail')['msg'] + assert error.startswith('Error fetching quotas info for policy policy') + + +def test_get_quota_retry_on_13001(): + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_fail_13001']), + ('ZAPI', 'quota-policy-get-iter', ZRR['quota_policy']), + ('ZAPI', 'quota-list-entries-iter', ZRR['quota_policy']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + quotas = my_obj.get_quotas() + print('QUOTAS', quotas) + assert quotas + + +def test_resize_warning(): + ''' warning as resize is not allowed if all rules were deleted ''' + register_responses([ + ('ZAPI', 'quota-resize', ZRR['quota_fail_14958']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.resize_quota('delete') + assert_warning_was_raised('Last rule deleted, but quota is on as resize is not allowed.') + + +def test_quota_on_warning(): + ''' warning as quota-on is not allowed if all rules were deleted ''' + register_responses([ + ('ZAPI', 'quota-on', ZRR['quota_fail_14958']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.on_or_off_quota('quota-on', 'delete') + print_warnings() + assert_warning_was_raised('Last rule deleted, quota is off.') + + +def test_convert_size_format(): + module_args = {'disk_limit': '10MB'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == '10240' + my_obj.parameters['disk_limit'] = '10' + assert my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == '10' + my_obj.parameters['disk_limit'] = '10tB' + assert my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == str(10 * 1024 * 1024 * 1024) + my_obj.parameters['disk_limit'] = '' + assert not my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == '' + + +def test_error_convert_size_format(): + module_args = { + 'disk_limit': '10MBi', + 'quota_target': '' + } + error = create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error.startswith('disk_limit input string is not a valid size format') + module_args = { + 'soft_disk_limit': 'MBi', + 'quota_target': '' + } + error = create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error.startswith('soft_disk_limit input string is not a valid size format') + module_args = { + 'soft_disk_limit': '10MB10', + 'quota_target': '' + } + error = create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error.startswith('soft_disk_limit input string is not a valid size format') + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_has_netapp_lib(has_netapp_lib): + has_netapp_lib.return_value = False + assert call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] == 'Error: the python NetApp-Lib module is required. Import error: None' + + +def create_from_main(): + register_responses([ + ('ZAPI', 'quota-list-entries-iter', ZRR['no_records']), + ('ZAPI', 'quota-status', ZRR['quota_on']), + ('ZAPI', 'quota-set-entry', ZRR['success']), + ]) + assert call_main(my_main, DEFAULT_ARGS)['changed'] + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'volume': 'ansible', + 'vserver': 'ansible', + 'quota_target': 'quota_user', + 'qtree': 'qt1', + 'type': 'user' +} + + +def test_rest_error_get(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on getting quota rule info' in error + + +def test_rest_successful_create(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('POST', 'storage/quota/rules', SRR['empty_good']), + ]) + module_args = { + "users": [{"name": "quota_user"}], + } + assert create_and_apply(my_module, ARGS_REST) + + +@patch('time.sleep') +def test_rest_successful_create_job_error(sleep): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('POST', 'storage/quota/rules', SRR['job_info']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'storage/volumes', SRR['volume_uuid']) + ]) + module_args = { + "users": [{"name": "quota_user"}], + } + assert create_and_apply(my_module, ARGS_REST) + print_warnings() + assert_warning_was_raised('Ignoring job status, assuming success.') + + +def test_rest_error_create(): + '''Test error rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('POST', 'storage/quota/rules', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on creating quotas rule:' in error + + +def test_delete_rest(): + ''' Test delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('DELETE', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_delete_rest(): + ''' Test error delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('DELETE', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on deleting quotas rule:' in error + + +def test_modify_files_limit_rest(): + ''' Test modify with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['empty_good']), + ]) + module_args = { + "file_limit": "122", "soft_file_limit": "90" + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_space_limit_rest(): + ''' Test modify with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['empty_good']), + ]) + module_args = { + "disk_limit": "1024", "soft_disk_limit": "80" + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_rest_error(): + ''' Test negative modify with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['generic_error']), + ]) + module_args = { + 'perform_user_mapping': True + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on modifying quotas rule:' in error + + +@patch('time.sleep') +def test_modify_rest_temporary_locked_error(sleep): + ''' Test negative modify with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + # wait for 60s if we get temporary locl error. + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_rest']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_rest']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['success']), + + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + # error persist even after 60s + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_rest']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_rest']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_rest']), + + # wait 60s in create for temporary locked error. + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('POST', 'storage/quota/rules', SRR['error_rest']), + ('POST', 'storage/quota/rules', SRR['success']), + ]) + module_args = { + 'perform_user_mapping': True + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + assert 'Error on modifying quotas rule:' in create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + + +def test_rest_successful_create_idempotency(): + '''Test successful rest create''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record_0_empty_limtis']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record_0_empty_limtis']), + ('GET', 'storage/volumes', SRR['quota_status']) + ]) + assert create_and_apply(my_module, ARGS_REST)['changed'] is False + module_args = { + "disk_limit": "0", "soft_disk_limit": "-", "file_limit": 0, "soft_file_limit": "-" + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False + module_args = { + "disk_limit": "0", "soft_disk_limit": "-1", "file_limit": "0", "soft_file_limit": "-1" + } + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False + + +def test_rest_successful_delete_idempotency(): + '''Test successful rest delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ]) + module_args = {'use_rest': 'always', 'state': 'absent'} + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] is False + + +def test_modify_quota_status_rest(): + ''' Test modify quota status and error with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['empty_good']) + ]) + module_args = {"set_quota_status": "on"} + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_convert_size_format_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ]) + module_args = { + 'disk_limit': '10MBi', + 'quota_target': '' + } + error = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert error.startswith('disk_limit input string is not a valid size format') + module_args = { + 'soft_disk_limit': 'MBi', + 'quota_target': '' + } + error = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert error.startswith('soft_disk_limit input string is not a valid size format') + module_args = { + 'soft_disk_limit': '10MB10', + 'quota_target': '' + } + error = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert error.startswith('soft_disk_limit input string is not a valid size format') + + +def test_convert_size_format_rest(): + module_args = {'disk_limit': '10MB'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == '10240' + my_obj.parameters['disk_limit'] = '10' + assert my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == '10' + my_obj.parameters['disk_limit'] = '10tB' + assert my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == str(10 * 1024 * 1024 * 1024) + my_obj.parameters['disk_limit'] = '' + assert not my_obj.convert_to_kb_or_bytes('disk_limit') + print(my_obj.parameters) + assert my_obj.parameters['disk_limit'] == '' + + +def test_warning_rest_delete_5308572(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('DELETE', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_5308572']) + ]) + assert create_and_apply(my_module, ARGS_REST, {'state': 'absent'})['changed'] + # assert 'Error on deleting quotas rule:' in error + msg = "Quota policy rule delete opertation succeeded. However the rule is still being enforced. To stop enforcing, "\ + "reinitialize(disable and enable again) the quota for volume ansible in SVM ansible." + assert_warning_was_raised(msg) + + +@patch('time.sleep') +def test_no_warning_rest_delete_5308572(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('DELETE', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_5308572']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['success']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['success']) + ]) + assert create_and_apply(my_module, ARGS_REST, {'state': 'absent', 'activate_quota_on_change': 'reinitialize'})['changed'] + assert_no_warnings() + + +def test_warning_rest_delete_5308569(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('DELETE', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_5308569']) + ]) + assert create_and_apply(my_module, ARGS_REST, {'state': 'absent'})['changed'] + # assert 'Error on deleting quotas rule:' in error + msg = "Quota policy rule delete opertation succeeded. However quota resize failed due to an internal error. To make quotas active, "\ + "reinitialize(disable and enable again) the quota for volume ansible in SVM ansible." + assert_warning_was_raised(msg) + + +@patch('time.sleep') +def test_no_warning_rest_delete_5308569(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('DELETE', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_5308569']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['success']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['success']) + ]) + assert create_and_apply(my_module, ARGS_REST, {'state': 'absent', 'activate_quota_on_change': 'reinitialize'})['changed'] + assert_no_warnings() + + +def test_warning_rest_create_5308568(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('POST', 'storage/quota/rules', SRR['error_5308568']), + ('GET', 'storage/volumes', SRR['volume_uuid']) + ]) + assert create_and_apply(my_module, ARGS_REST)['changed'] + msg = "Quota policy rule create opertation succeeded. However quota resize failed due to an internal error. To make quotas active, "\ + "reinitialize(disable and enable again) the quota for volume ansible in SVM ansible." + assert_warning_was_raised(msg) + + +@patch('time.sleep') +def test_no_warning_rest_create_5308568(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('POST', 'storage/quota/rules', SRR['error_5308568']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('PATCH', 'storage/volumes/sdgthfd', SRR['success']), + ('PATCH', 'storage/volumes/sdgthfd', SRR['success']) + ]) + assert create_and_apply(my_module, ARGS_REST, {'activate_quota_on_change': 'reinitialize'})['changed'] + assert_no_warnings() + + +def test_warning_rest_create_5308571(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_status']), + ('POST', 'storage/quota/rules', SRR['error_5308571']), + ('GET', 'storage/volumes', SRR['volume_uuid']) + ]) + assert create_and_apply(my_module, ARGS_REST)['changed'] + msg = "Quota policy rule create opertation succeeded. but quota resize is skipped. To make quotas active, "\ + "reinitialize(disable and enable again) the quota for volume ansible in SVM ansible." + assert_warning_was_raised(msg) + + +@patch('time.sleep') +def test_no_warning_rest_create_5308571(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('POST', 'storage/quota/rules', SRR['error_5308568']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('PATCH', 'storage/volumes/sdgthfd', SRR['success']), + ('PATCH', 'storage/volumes/sdgthfd', SRR['success']) + ]) + assert create_and_apply(my_module, ARGS_REST, {'activate_quota_on_change': 'reinitialize'})['changed'] + assert_no_warnings() + + +def test_warning_rest_modify_5308567(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_5308567']), + ]) + module_args = {"soft_file_limit": "100"} + assert create_and_apply(my_module, ARGS_REST, module_args) + msg = "Quota policy rule modify opertation succeeded. However quota resize failed due to an internal error. To make quotas active, "\ + "reinitialize(disable and enable again) the quota for volume ansible in SVM ansible." + assert_warning_was_raised(msg) + + +@patch('time.sleep') +def test_no_warning_rest_modify_5308567(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/quota/rules', SRR['quota_record']), + ('GET', 'storage/volumes', SRR['quota_on']), + ('PATCH', 'storage/quota/rules/264a9e0b-2e03-11e9-a610-005056a7b72d', SRR['error_5308567']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['success']), + ('PATCH', 'storage/volumes/264a9e0b-2e03-11e9-a610-005056a7b72da', SRR['success']) + ]) + module_args = {"soft_file_limit": "100", 'activate_quota_on_change': 'reinitialize'} + assert create_and_apply(my_module, ARGS_REST, module_args)['changed'] + assert_no_warnings() + + +def test_if_all_methods_catch_exception_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/quota/rules', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/quota/rules', SRR['generic_error']), + ('DELETE', 'storage/quota/rules/abdcdef', SRR['generic_error']), + ('PATCH', 'storage/quota/rules/abdcdef', SRR['generic_error']), + ('PATCH', 'storage/volumes/ghijklmn', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + + ]) + my_obj = create_module(my_module, ARGS_REST) + my_obj.quota_uuid = 'abdcdef' + my_obj.volume_uuid = 'ghijklmn' + assert 'Error on getting quota rule info' in expect_and_capture_ansible_exception(my_obj.get_quotas_rest, 'fail')['msg'] + assert 'Error on getting quota status info' in expect_and_capture_ansible_exception(my_obj.get_quota_status_or_volume_id_rest, 'fail')['msg'] + assert 'Error on getting volume' in expect_and_capture_ansible_exception(my_obj.get_quota_status_or_volume_id_rest, 'fail', True)['msg'] + assert 'does not exist' in expect_and_capture_ansible_exception(my_obj.get_quota_status_or_volume_id_rest, 'fail', True)['msg'] + assert 'Error on creating quotas rule' in expect_and_capture_ansible_exception(my_obj.quota_entry_set_rest, 'fail')['msg'] + assert 'Error on deleting quotas rule' in expect_and_capture_ansible_exception(my_obj.quota_entry_delete_rest, 'fail')['msg'] + assert 'Error on modifying quotas rule' in expect_and_capture_ansible_exception(my_obj.quota_entry_modify_rest, 'fail', {})['msg'] + assert 'Error setting quota-on for ansible' in expect_and_capture_ansible_exception(my_obj.on_or_off_quota_rest, 'fail', 'quota-on')['msg'] + error = "Error: Qtree cannot be specified for a tree type rule" + assert error in create_module(my_module, ARGS_REST, {'qtree': 'qtree1', 'type': 'tree'}, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py new file mode 100644 index 000000000..d9f89a21a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py @@ -0,0 +1,128 @@ +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_rest_cli''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_rest_cli import NetAppONTAPCommandREST as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'allow': (200, {'Allow': ['GET', 'WHATEVER']}, None) +}, False) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'auto', + 'command': 'volume', + 'verb': 'GET', + 'params': {'fields': 'size,percent_used'} +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + args = dict(DEFAULT_ARGS) + args.pop('verb') + error = 'missing required arguments: verb' + assert error in call_main(my_main, args, fail=True)['msg'] + + +def test_rest_cli(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/volume', SRR['empty_good']), + ]) + assert call_main(my_main, DEFAULT_ARGS)['changed'] is False + + +def test_rest_cli_options(): + module_args = {'verb': 'OPTIONS'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('OPTIONS', 'private/cli/volume', SRR['allow']), + ]) + exit_json = call_main(my_main, DEFAULT_ARGS, module_args) + assert not exit_json['changed'] + assert 'Allow' in exit_json['msg'] + + +def test_negative_connection_error(): + module_args = {'verb': 'OPTIONS'} + register_responses([ + ('GET', 'cluster', SRR['generic_error']), + ]) + msg = "failed to connect to REST over hostname: ['Expected error']. Use na_ontap_command for non-rest CLI." + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def check_verb(verb): + module_args = {'verb': verb} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + (verb, 'private/cli/volume', SRR['allow']), + ], "test_verbs") + + exit_json = call_main(my_main, DEFAULT_ARGS, module_args) + assert not exit_json['changed'] if verb in ['GET', 'OPTIONS'] else exit_json['changed'] + assert 'Allow' in exit_json['msg'] + # assert mock_request.call_args[0][0] == verb + + +def test_verbs(): + for verb in ['POST', 'DELETE', 'PATCH', 'OPTIONS', 'PATCH']: + check_verb(verb) + + +def test_check_mode(): + module_args = {'verb': 'GET'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.module.check_mode = True + result = expect_and_capture_ansible_exception(my_obj.apply, 'exit') + assert result['changed'] is False + msg = "Would run command: 'volume'" + assert msg in result['msg'] + + +def test_negative_verb(): + module_args = {'verb': 'GET'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.verb = 'INVALID' + msg = 'Error: unexpected verb INVALID' + assert msg in expect_and_capture_ansible_exception(my_obj.apply, 'fail')['msg'] + + +def test_negative_error(): + module_args = {'verb': 'GET'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'private/cli/volume', SRR['generic_error']), + ]) + msg = 'Error: Expected error' + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py new file mode 100644 index 000000000..bf678e3ac --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_info.py @@ -0,0 +1,1195 @@ +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' Unit Tests NetApp ONTAP REST APIs Ansible module: na_ontap_rest_info ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import call_main, create_module, \ + expect_and_capture_ansible_exception, patch_ansible, create_and_apply, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_rest_info \ + import NetAppONTAPGatherInfo as ontap_rest_info_module, main as my_main + +if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # common responses + 'validate_ontap_version_pass': ( + 200, dict(version=dict(generation=9, major=10, minor=1, full='dummy_9_10_1')), None), + 'validate_ontap_version_fail': (200, None, 'API not found error'), + 'error_invalid_api': (500, None, {'code': 3, 'message': 'Invalid API'}), + 'error_user_is_not_authorized': (500, None, {'code': 6, 'message': 'user is not authorized'}), + 'error_no_processing': (500, None, {'code': 123, 'message': 'error reported as is'}), + 'error_no_aggr_recommendation': ( + 500, None, {'code': 19726344, 'message': 'No recommendation can be made for this cluster'}), + 'get_subset_info': (200, + {'_links': {'self': {'href': 'dummy_href'}}, + 'num_records': 3, + 'records': [{'name': 'dummy_vol1'}, + {'name': 'dummy_vol2'}, + {'name': 'dummy_vol3'}], + 'version': 'ontap_version'}, None), + 'get_subset_info_with_next': (200, + {'_links': {'self': {'href': 'dummy_href'}, + 'next': {'href': '/api/next_record_api'}}, + 'num_records': 3, + 'records': [{'name': 'dummy_vol1'}, + {'name': 'dummy_vol2'}, + {'name': 'dummy_vol3'}], + 'version': 'ontap_version'}, None), + 'get_next_record': (200, + {'_links': {'self': {'href': 'dummy_href'}}, + 'num_records': 2, + 'records': [{'name': 'dummy_vol1'}, + {'name': 'dummy_vol2'}], + 'version': 'ontap_version'}, None), + 'metrocluster_post': (200, + {'job': { + 'uuid': 'fde79888-692a-11ea-80c2-005056b39fe7', + '_links': { + 'self': { + 'href': '/api/cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7'}} + }}, + None), + 'metrocluster_return': (200, + {"_links": { + "self": { + "href": "/api/cluster/metrocluster/diagnostics" + } + }, "aggregate": { + "state": "ok", + "summary": { + "message": "" + }, "timestamp": "2020-07-22T16:42:51-07:00" + }}, None), + 'job': (200, + { + "uuid": "cca3d070-58c6-11ea-8c0c-005056826c14", + "description": "POST /api/cluster/metrocluster", + "state": "success", + "message": "There are not enough disks in Pool1.", + "code": 2432836, + "start_time": "2020-02-26T10:35:44-08:00", + "end_time": "2020-02-26T10:47:38-08:00", + "_links": { + "self": { + "href": "/api/cluster/jobs/cca3d070-58c6-11ea-8c0c-005056826c14" + } + } + }, None), + 'get_private_cli_subset_info': (200, + { + 'records': [ + {'node': 'node1', 'check_type': 'type'}, + {'node': 'node1', 'check_type': 'type'}, + {'node': 'node1', 'check_type': 'type'}], + "num_records": 3}, None), + 'get_private_cli_vserver_security_file_directory_info': ( + 200, + { + 'records': [ + {'acls': ['junk', 'junk', 'DACL - ACEs', 'AT-user-0x123']}, + {'node': 'node1', 'check_type': 'type'}, + {'node': 'node1', 'check_type': 'type'}], + "num_records": 3}, None), + 'lun_info': (200, {'records': [{"serial_number": "z6CcD+SK5mPb"}]}, None), + 'volume_info': (200, {"uuid": "7882901a-1aef-11ec-a267-005056b30cfa"}, None), + 'svm_uuid': (200, {"records": [{"uuid": "test_uuid"}], "num_records": 1}, None), + 'get_uuid_policy_id_export_policy': ( + 200, + { + "records": [{ + "svm": { + "uuid": "uuid", + "name": "svm"}, + "id": 123, + "name": "ansible" + }], + "num_records": 1}, None), + 'vscan_on_access_policies': ( + 200, {"records": [ + { + "name": "on-access-test", + "mandatory": True, + "scope": { + "scan_readonly_volumes": True, + "exclude_paths": [ + "\\dir1\\dir2\\name", + "\\vol\\a b", + "\\vol\\a,b\\" + ], + "scan_without_extension": True, + "include_extensions": [ + "mp*", + "txt" + ], + "exclude_extensions": [ + "mp*", + "txt" + ], + "only_execute_access": True, + "max_file_size": "2147483648" + }, + "enabled": True + } + ]}, None + ), + 'vscan_on_demand_policies': ( + 200, {"records": [ + { + "log_path": "/vol0/report_dir", + "scan_paths": [ + "/vol1/", + "/vol2/cifs/" + ], + "name": "task-1", + "svm": { + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "scope": { + "exclude_paths": [ + "/vol1/cold-files/", + "/vol1/cifs/names" + ], + "scan_without_extension": True, + "include_extensions": [ + "vmdk", + "mp*" + ], + "exclude_extensions": [ + "mp3", + "mp4" + ], + "max_file_size": "10737418240" + }, + "schedule": { + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "name": "weekly", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + } + } + ]}, None + ), + 'vscan_scanner_pools': ( + 200, {"records": [ + { + "cluster": { + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "name": "cluster1", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + "name": "scanner-1", + "servers": [ + "1.1.1.1", + "10.72.204.27", + "vmwin204-27.fsct.nb" + ], + "privileged_users": [ + "cifs\\u1", + "cifs\\u2" + ], + "svm": { + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "role": "primary" + } + ]}, None + ) +}) + +ALL_SUBSETS = ['application/applications', + 'application/consistency-groups', + 'application/templates', + 'cloud/targets', + 'cluster', + 'cluster/chassis', + 'cluster/counter/tables', + 'cluster/fireware/history', + 'cluster/jobs', + 'cluster/licensing/capacity-pools', + 'cluster/licensing/license-managers', + 'cluster/licensing/licenses', + 'cluster/mediators', + 'cluster/metrics', + 'cluster/metrocluster', + 'cluster/metrocluster/diagnostics', + 'cluster/metrocluster/dr-groups', + 'cluster/metrocluster/interconnects', + 'cluster/metrocluster/nodes', + 'cluster/metrocluster/operations', + 'cluster/metrocluster/svms', + 'cluster/nodes', + 'cluster/ntp/keys', + 'cluster/ntp/servers', + 'cluster/peers', + 'cluster/schedules', + 'cluster/sensors', + 'cluster/software', + 'cluster/software/download', + 'cluster/software/history', + 'cluster/software/packages', + 'cluster/web', + 'name-services/cache/group-membership/settings', + 'name-services/cache/host/settings', + 'name-services/cache/netgroup/settings', + 'name-services/cache/setting', + 'name-services/cache/unix-group/settings', + 'name-services/dns', + 'name-services/ldap', + 'name-services/ldap-schemas', + 'name-services/local-hosts', + 'name-services/name-mappings', + 'name-services/nis', + 'name-services/unix-groups', + 'name-services/unix-users', + 'network/ethernet/broadcast-domains', + 'network/ethernet/ports', + 'network/ethernet/switch/ports', + 'network/ethernet/switches', + 'network/fc/fabrics', + 'network/fc/interfaces', + 'network/fc/logins', + 'network/fc/ports', + 'network/fc/wwpn-aliases', + 'network/http-proxy', + 'network/ip/bgp/peer-groups', + 'network/ip/interfaces', + 'network/ip/routes', + 'network/ip/service-policies', + 'network/ip/subnets', + 'network/ipspaces', + 'private/support/alerts', + 'protocols/active-directory', + 'protocols/audit', + 'protocols/cifs/connections', + 'protocols/cifs/domains', + 'protocols/cifs/group-policies', + 'protocols/cifs/home-directory/search-paths', + 'protocols/cifs/local-groups', + 'protocols/cifs/local-users', + 'protocols/cifs/netbios', + 'protocols/cifs/services', + 'protocols/cifs/session/files', + 'protocols/cifs/sessions', + 'protocols/cifs/shadow-copies', + 'protocols/cifs/shadowcopy-sets', + 'protocols/cifs/shares', + 'protocols/cifs/users-and-groups/privileges', + 'protocols/cifs/unix-symlink-mapping', + 'protocols/fpolicy', + 'protocols/locks', + 'protocols/ndmp', + 'protocols/ndmp/nodes', + 'protocols/ndmp/sessions', + 'protocols/ndmp/svms', + 'protocols/nfs/connected-clients', + 'protocols/nfs/connected-client-maps', + 'protocols/nfs/connected-client-settings', + 'protocols/nfs/export-policies', + 'protocols/nfs/kerberos/interfaces', + 'protocols/nfs/kerberos/realms', + 'protocols/nfs/services', + 'protocols/nvme/interfaces', + 'protocols/nvme/services', + 'protocols/nvme/subsystems', + 'protocols/nvme/subsystem-controllers', + 'protocols/nvme/subsystem-maps', + 'protocols/s3/buckets', + 'protocols/s3/services', + 'protocols/san/fcp/services', + 'protocols/san/igroups', + 'protocols/san/iscsi/credentials', + 'protocols/san/iscsi/services', + 'protocols/san/iscsi/sessions', + 'protocols/san/lun-maps', + 'protocols/san/portsets', + 'protocols/san/vvol-bindings', + 'protocols/vscan', + 'protocols/vscan/server-status', + 'security', + 'security/accounts', + 'security/anti-ransomware/suspects', + 'security/audit', + 'security/audit/destinations', + 'security/audit/messages', + 'security/authentication/cluster/ad-proxy', + 'security/authentication/cluster/ldap', + 'security/authentication/cluster/nis', + 'security/authentication/cluster/saml-sp', + 'security/authentication/publickeys', + 'security/aws-kms', + 'security/azure-key-vaults', + 'security/certificates', + 'security/gcp-kms', + 'security/ipsec', + 'security/ipsec/ca-certificates', + 'security/ipsec/policies', + 'security/ipsec/security-associations', + 'security/key-manager-configs', + 'security/key-managers', + 'security/key-stores', + 'security/login/messages', + 'security/multi-admin-verify', + 'security/multi-admin-verify/approval-groups', + 'security/multi-admin-verify/requests', + 'security/multi-admin-verify/rules', + 'security/roles', + 'security/ssh', + 'security/ssh/svms', + 'snapmirror/policies', + 'snapmirror/relationships', + 'storage/aggregates', + 'storage/bridges', + 'storage/cluster', + 'storage/disks', + 'storage/file/clone/split-loads', + 'storage/file/clone/split-status', + 'storage/file/clone/tokens', + 'storage/file/moves', + 'storage/flexcache/flexcaches', + 'storage/flexcache/origins', + 'storage/luns', + 'storage/namespaces', + 'storage/pools', + 'storage/ports', + 'storage/qos/policies', + 'storage/qos/workloads', + 'storage/qtrees', + 'storage/quota/reports', + 'storage/quota/rules', + 'storage/shelves', + 'storage/snaplock/audit-logs', + 'storage/snaplock/compliance-clocks', + 'storage/snaplock/event-retention/operations', + 'storage/snaplock/event-retention/policies', + 'storage/snaplock/file-fingerprints', + 'storage/snaplock/litigations', + 'storage/snapshot-policies', + 'storage/switches', + 'storage/tape-devices', + 'storage/volumes', + 'storage/volume-efficiency-policies', + 'support/autosupport', + 'support/autosupport/check', + 'support/autosupport/messages', + 'support/auto-update', + 'support/auto-update/configurations', + 'support/auto-update/updates', + 'support/configuration-backup', + 'support/configuration-backup/backups', + 'support/coredump/coredumps', + 'support/ems', + 'support/ems/destinations', + 'support/ems/events', + 'support/ems/filters', + 'support/ems/messages', + 'support/snmp', + 'support/snmp/traphosts', + 'support/snmp/users', + 'svm/migrations', + 'svm/peers', + 'svm/peer-permissions', + 'svm/svms'] + +# Super Important, Metrocluster doesn't call get_subset_info and has 3 api calls instead of 1!!!! +# The metrocluster calls need to be in the correct place. The Module return the keys in a sorted list. +ALL_RESPONSES = [ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'application/applications', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'application/templates', SRR['get_subset_info']), + ('GET', 'cloud/targets', SRR['get_subset_info']), + ('GET', 'cluster', SRR['get_subset_info']), + ('GET', 'cluster/chassis', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'cluster/jobs', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'cluster/licensing/licenses', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'cluster/metrics', SRR['get_subset_info']), + ('GET', 'cluster/metrocluster', SRR['get_subset_info']), + # MCC DIAGs + ('POST', 'cluster/metrocluster/diagnostics', SRR['metrocluster_post']), + ('GET', 'cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7', SRR['job']), + ('GET', 'cluster/metrocluster/diagnostics', SRR['metrocluster_return']), + # Back to normal + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'cluster/metrocluster/nodes', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'cluster/nodes', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'cluster/ntp/servers', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'support/ems/filters', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', '*', SRR['get_subset_info']), + ('GET', 'svm/peer-permissions', SRR['get_subset_info']), + ('GET', 'svm/peers', SRR['get_subset_info']), + ('GET', 'svm/svms', SRR['get_private_cli_subset_info']), +] + + +def set_default_args(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False + }) + + +def set_args_run_ontap_version_check(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'gather_subset': ['volume_info'] + }) + + +def set_args_run_metrocluster_diag(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'gather_subset': ['cluster/metrocluster/diagnostics'] + }) + + +def set_args_run_ontap_gather_facts_for_vserver_info(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'gather_subset': ['vserver_info'] + }) + + +def set_args_run_ontap_gather_facts_for_volume_info(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'gather_subset': ['volume_info'] + }) + + +def set_args_run_ontap_gather_facts_for_all_subsets(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'gather_subset': ['all'] + }) + + +def set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_pass(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'fields': '*', + 'gather_subset': ['all'] + }) + + +def set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_fail(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 1024, + 'fields': ['uuid', 'name', 'node'], + 'gather_subset': ['all'] + }) + + +def set_args_run_ontap_gather_facts_for_aggregate_info_with_fields_section_pass(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'fields': ['uuid', 'name', 'node'], + 'validate_certs': False, + 'max_records': 1024, + 'gather_subset': ['aggregate_info'] + }) + + +def set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'https': True, + 'validate_certs': False, + 'max_records': 3, + 'gather_subset': ['volume_info'] + }) + + +def test_run_ontap_version_check_for_9_6_pass(): + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info']), + ]) + assert not create_and_apply(ontap_rest_info_module, set_args_run_ontap_version_check())['changed'] + + +def test_run_ontap_version_check_for_10_2_pass(): + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info']), + ]) + assert not create_and_apply(ontap_rest_info_module, set_args_run_ontap_version_check())['changed'] + + +def test_run_ontap_version_check_for_9_2_fail(): + ''' Test for Checking the ONTAP version ''' + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_fail']), + ]) + assert call_main(my_main, set_args_run_ontap_version_check(), + fail=True)['msg'] == 'Error using REST for version, error: %s.' % SRR['validate_ontap_version_fail'][2] + + +def test_version_warning_message(): + gather_subset = ['cluster/metrocluster/diagnostics'] + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + create_and_apply(ontap_rest_info_module, set_args_run_metrocluster_diag()) + assert_warning_was_raised('The following subset have been removed from your query as they are not supported on ' + + 'your version of ONTAP cluster/metrocluster/diagnostics requires (9, 8), ') + + +# metrocluster/diagnostics doesn't call get_subset_info and has 3 api calls instead of 1 +def test_run_metrocluster_pass(): + gather_subset = ['cluster/metrocluster/diagnostics'] + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'cluster/metrocluster/diagnostics', SRR['metrocluster_post']), + ('GET', 'cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7', SRR['job']), + ('GET', 'cluster/metrocluster/diagnostics', SRR['metrocluster_return']), + ]) + assert set(create_and_apply(ontap_rest_info_module, set_args_run_metrocluster_diag())['ontap_info']) == set( + gather_subset) + + +def test_run_ontap_gather_facts_for_vserver_info_pass(): + gather_subset = ['svm/svms'] + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'svm/svms', SRR['get_subset_info']), + ]) + assert set(create_and_apply(ontap_rest_info_module, set_args_run_ontap_gather_facts_for_vserver_info())['ontap_info']) == set(gather_subset) + + +def test_run_ontap_gather_facts_for_volume_info_pass(): + gather_subset = ['storage/volumes'] + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info']), + ]) + assert set(create_and_apply(ontap_rest_info_module, set_args_run_ontap_gather_facts_for_volume_info())['ontap_info']) == set(gather_subset) + + +def test_run_ontap_gather_facts_for_all_subsets_pass(): + gather_subset = ALL_SUBSETS + register_responses(ALL_RESPONSES) + assert set(create_and_apply(ontap_rest_info_module, set_args_run_ontap_gather_facts_for_all_subsets())['ontap_info']) == set(gather_subset) + + +def test_run_ontap_gather_facts_for_all_subsets_with_fields_section_pass(): + gather_subset = ALL_SUBSETS + register_responses(ALL_RESPONSES) + assert set(create_and_apply(ontap_rest_info_module, + set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_pass() + )['ontap_info']) == set(gather_subset) + + +def test_run_ontap_gather_facts_for_all_subsets_with_fields_section_fail(): + error_message = "Error: fields: %s, only one subset will be allowed." \ + % set_args_run_ontap_gather_facts_for_aggregate_info_with_fields_section_pass()['fields'] + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ]) + assert \ + create_and_apply(ontap_rest_info_module, + set_args_run_ontap_gather_facts_for_all_subsets_with_fields_section_fail(), + fail=True + )['msg'] == error_message + + +def test_run_ontap_gather_facts_for_aggregate_info_pass_with_fields_section_pass(): + gather_subset = ['storage/aggregates'] + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/aggregates', SRR['get_subset_info']), + ]) + assert set(create_and_apply(ontap_rest_info_module, + set_args_run_ontap_gather_facts_for_aggregate_info_with_fields_section_pass() + )['ontap_info']) == set(gather_subset) + + +def test_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass(): + total_records = 5 + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info_with_next']), + ('GET', '/next_record_api', SRR['get_next_record']), + ]) + assert create_and_apply(ontap_rest_info_module, + set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + )['ontap_info']['storage/volumes']['num_records'] == total_records + + +def test_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass_python_keys(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + args['state'] = 'info' + total_records = 5 + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info_with_next']), + ('GET', '/next_record_api', SRR['get_next_record']), + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info']['storage_volumes']['num_records'] == total_records + + +def test_get_all_records_for_volume_info_with_parameters(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + args['parameters'] = {'fields': '*'} + total_records = 5 + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info_with_next']), + ('GET', '/next_record_api', SRR['get_next_record']), + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info']['storage_volumes']['num_records'] == total_records + + +def test_negative_error_on_get_next(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + args['parameters'] = {'fields': '*'} + total_records = 5 + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['get_subset_info_with_next']), + ('GET', '/next_record_api', SRR['generic_error']), + ]) + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == 'Expected error' + + +def test_negative_bad_api(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['error_invalid_api']), + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info']['storage_volumes'] == 'Invalid API' + + +def test_negative_error_no_aggr_recommendation(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['error_no_aggr_recommendation']), + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info']['storage_volumes'] == 'No recommendation can be made for this cluster' + + +def test_negative_error_not_authorized(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['error_user_is_not_authorized']), + ]) + assert 'user is not authorized to make' in create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] + + +def test_negative_error_no_processing(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['use_python_keys'] = True + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['error_no_processing']), + ]) + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg']['message'] == 'error reported as is' + + +def test_strip_dacls(): + record = {} + response = { + 'records': [record] + } + assert ontap_rest_info_module.strip_dacls(response) is None + record['acls'] = [] + assert ontap_rest_info_module.strip_dacls(response) is None + record['acls'] = ['junk', 'junk', 'DACL - ACEs'] + assert ontap_rest_info_module.strip_dacls(response) == [] + record['acls'] = ['junk', 'junk', 'DACL - ACEs', 'AT-user-0x123'] + assert ontap_rest_info_module.strip_dacls(response) == [{'access_type': 'AT', 'user_or_group': 'user'}] + record['acls'] = ['junk', 'junk', 'DACL - ACEs', 'AT-user-0x123', 'AT2-group-0xABC'] + assert ontap_rest_info_module.strip_dacls(response) == [{'access_type': 'AT', 'user_or_group': 'user'}, + {'access_type': 'AT2', 'user_or_group': 'group'}] + + +def test_private_cli_vserver_security_file_directory(): + args = set_args_get_all_records_for_volume_info_to_check_next_api_call_functionality_pass() + args['gather_subset'] = 'private/cli/vserver/security/file-directory' + args['use_python_keys'] = True + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'private/cli/vserver/security/file-directory', SRR['get_private_cli_vserver_security_file_directory_info']), + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info'] == { + 'private_cli_vserver_security_file_directory': [{'access_type': 'AT', 'user_or_group': 'user'}]} + + +def test_get_ontap_subset_info_all_with_field(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'some/api', SRR['get_subset_info']), + ]) + my_obj = create_module(ontap_rest_info_module, set_default_args()) + subset_info = {'subset': {'api_call': 'some/api'}} + assert my_obj.get_ontap_subset_info_all('subset', 'fields', subset_info)['num_records'] == 3 + + +def test_negative_get_ontap_subset_info_all_bad_subset(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + my_obj = create_module(ontap_rest_info_module, set_default_args()) + msg = 'Specified subset bad_subset is not found, supported subsets are []' + assert expect_and_capture_ansible_exception(my_obj.get_ontap_subset_info_all, 'fail', 'bad_subset', None, {})['msg'] == msg + + +def test_demo_subset(): + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'cluster/software', SRR['get_subset_info']), + ('GET', 'svm/svms', SRR['get_subset_info']), + ('GET', 'cluster/nodes', SRR['get_subset_info']), + ]) + assert 'cluster/nodes' in call_main(my_main, set_default_args(), {'gather_subset': 'demo'})['ontap_info'] + + +def test_subset_with_default_fields(): + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/aggregates', SRR['get_subset_info']), + ]) + assert 'storage/aggregates' in \ + create_and_apply(ontap_rest_info_module, set_default_args(), {'gather_subset': 'aggr_efficiency_info'})[ + 'ontap_info'] + + +def test_negative_error_on_post(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'api', SRR['generic_error']), + ]) + assert create_module(ontap_rest_info_module, set_default_args()).run_post({'api_call': 'api'}) is None + + +@patch('time.sleep') +def test_negative_error_on_wait_after_post(sleep_mock): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'api', SRR['metrocluster_post']), + ('GET', 'cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7', SRR['generic_error']), + ('GET', 'cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7', SRR['generic_error']), # retries + ('GET', 'cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7', SRR['generic_error']), + ('GET', 'cluster/jobs/fde79888-692a-11ea-80c2-005056b39fe7', SRR['generic_error']), + ]) + my_obj = create_module(ontap_rest_info_module, set_default_args()) + assert expect_and_capture_ansible_exception(my_obj.run_post, 'fail', {'api_call': 'api'})['msg'] == ' - '.join( + ['Expected error'] * 4) + + +def test_owning_resource_snapshot(): + args = set_default_args() + args['gather_subset'] = 'storage/volumes/snapshots' + args['owning_resource'] = {'volume_name': 'vol1', 'svm_name': 'svm1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['volume_info']), + ('GET', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa/snapshots', SRR['volume_info']) + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info'] + + +def test_owning_resource_snapshot_missing_1_resource(): + args = set_default_args() + args['gather_subset'] = 'storage/volumes/snapshots' + args['owning_resource'] = {'volume_name': 'vol1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ]) + msg = 'Error: volume_name, svm_name are required for storage/volumes/snapshots' + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == msg + + +def test_owning_resource_snapshot_missing_resource(): + args = set_default_args() + args['gather_subset'] = 'storage/volumes/snapshots' + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ]) + msg = 'Error: volume_name, svm_name are required for storage/volumes/snapshots' + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == msg + + +def test_owning_resource_snapshot_volume_not_found(): + args = set_default_args() + args['gather_subset'] = 'storage/volumes/snapshots' + args['owning_resource'] = {'volume_name': 'vol1', 'svm_name': 'svm1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/volumes', SRR['generic_error']), + ]) + msg = 'Could not find volume vol1 on SVM svm1' + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == msg + + +def test_owning_resource_vscan_on_access_policies(): + args = set_default_args() + args['gather_subset'] = 'protocols/vscan/on-access-policies' + args['owning_resource'] = {'svm_name': 'svm1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/test_uuid/on-access-policies', SRR['vscan_on_access_policies']) + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info'] + + +def test_owning_resource_vscan_on_demand_policies(): + args = set_default_args() + args['gather_subset'] = 'protocols/vscan/on-demand-policies' + args['owning_resource'] = {'svm_name': 'svm1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/test_uuid/on-demand-policies', SRR['vscan_on_access_policies']) + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info'] + + +def test_owning_resource_vscan_scanner_pools(): + args = set_default_args() + args['gather_subset'] = 'protocols/vscan/scanner-pools' + args['owning_resource'] = {'svm_name': 'svm1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/test_uuid/scanner-pools', SRR['vscan_scanner_pools']) + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info'] + + +def test_owning_resource_export_policies_rules(): + args = set_default_args() + args['gather_subset'] = 'protocols/nfs/export-policies/rules' + args['owning_resource'] = {'policy_name': 'policy_name', 'svm_name': 'svm1', 'rule_index': '1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'protocols/nfs/export-policies', SRR['get_uuid_policy_id_export_policy']), + ('GET', 'protocols/nfs/export-policies/123/rules/1', SRR['get_uuid_policy_id_export_policy']) + ]) + assert create_and_apply(ontap_rest_info_module, args)['ontap_info'] + + +def test_owning_resource_export_policies_rules_missing_resource(): + args = set_default_args() + args['gather_subset'] = 'protocols/nfs/export-policies/rules' + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ]) + msg = 'Error: policy_name, svm_name, rule_index are required for protocols/nfs/export-policies/rules' + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == msg + + +def test_owning_resource_export_policies_rules_missing_1_resource(): + args = set_default_args() + args['gather_subset'] = 'protocols/nfs/export-policies/rules' + args['owning_resource'] = {'policy_name': 'policy_name', 'svm_name': 'svm1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ]) + msg = 'Error: policy_name, svm_name, rule_index are required for protocols/nfs/export-policies/rules' + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == msg + + +def test_owning_resource_export_policies_rules_policy_not_found(): + args = set_default_args() + args['gather_subset'] = 'protocols/nfs/export-policies/rules' + args['owning_resource'] = {'policy_name': 'policy_name', 'svm_name': 'svm1', 'rule_index': '1'} + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'protocols/nfs/export-policies', SRR['generic_error']), + ]) + msg = 'Could not find export policy policy_name on SVM svm1' + assert create_and_apply(ontap_rest_info_module, args, fail=True)['msg'] == msg + + +def test_lun_info_with_serial(): + args = set_default_args() + args['gather_subset'] = 'storage/luns' + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/luns', SRR['lun_info']), + ]) + info = create_and_apply(ontap_rest_info_module, args) + assert 'ontap_info' in info + assert 'storage/luns' in info['ontap_info'] + assert 'records' in info['ontap_info']['storage/luns'] + records = info['ontap_info']['storage/luns']['records'] + assert records + lun_info = records[0] + print('INFO', lun_info) + assert lun_info['serial_number'] == 'z6CcD+SK5mPb' + assert lun_info['serial_hex'] == '7a364363442b534b356d5062' + assert lun_info['naa_id'] == 'naa.600a0980' + '7a364363442b534b356d5062' + + +def test_ignore_api_errors(): + args = set_default_args() + args['gather_subset'] = 'storage/luns' + args['ignore_api_errors'] = ['something', 'Expected error'] + args['fields'] = ['**'] + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ('GET', 'storage/luns', SRR['error_record']), + ]) + info = create_and_apply(ontap_rest_info_module, args) + assert 'ontap_info' in info + assert 'storage/luns' in info['ontap_info'] + assert 'error' in info['ontap_info']['storage/luns'] + error = info['ontap_info']['storage/luns']['error'] + assert error + assert error['code'] == 6 + assert error['message'] == 'Expected error' + print_warnings() + assert_warning_was_raised('Using ** can put an extra load on the system and should not be used in production') + + +def test_private_cli_fields(): + register_responses([ + ('GET', 'cluster', SRR['validate_ontap_version_pass']), + ]) + args = set_default_args() + my_obj = create_module(ontap_rest_info_module, args) + error = 'Internal error, no field for unknown_api' + assert error in expect_and_capture_ansible_exception(my_obj.private_cli_fields, 'fail', 'unknown_api')['msg'] + assert my_obj.private_cli_fields('private/cli/vserver/security/file-directory') == 'acls' + assert my_obj.private_cli_fields('support/autosupport/check') == 'node,corrective-action,status,error-detail,check-type,check-category' + my_obj.parameters['fields'] = ['f1', 'f2'] + assert my_obj.private_cli_fields('private/cli/vserver/security/file-directory') == 'f1,f2' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_restit.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_restit.py new file mode 100644 index 000000000..89289386a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_restit.py @@ -0,0 +1,346 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_cluster ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, call +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_restit \ + import NetAppONTAPRestAPI as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy_9_7_0')), None), + 'is_rest_95': (200, dict(version=dict(generation=9, major=5, minor=0, full='dummy_9_5_0')), None), + 'is_rest_96': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy_9_6_0')), None), + 'is_rest_97': (200, dict(version=dict(generation=9, major=7, minor=0, full='dummy_9_7_0')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': ({}, None, None), + 'zero_record': (200, {'records': []}, None), + 'job_id_record': ( + 200, { + 'job': { + 'uuid': '94b6e6a7-d426-11eb-ac81-00505690980f', + '_links': {'self': {'href': '/api/cluster/jobs/94b6e6a7-d426-11eb-ac81-00505690980f'}}}, + 'cli_output': ' Use the "job show -id 2379" command to view the status of this operation.'}, None), + 'job_response_record': ( + 200, { + "uuid": "f03ccbb6-d8bb-11eb-ac81-00505690980f", + "description": "File Directory Security Apply Job", + "state": "success", + "message": "Complete: Operation completed successfully. File ACLs modified using policy \"policy1\" on Vserver \"GBSMNAS80LD\". File count: 0. [0]", + "code": 0, + "start_time": "2021-06-29T05:25:26-04:00", + "end_time": "2021-06-29T05:25:26-04:00" + }, None), + 'job_response_record_running': ( + 200, { + "uuid": "f03ccbb6-d8bb-11eb-ac81-00505690980f", + "description": "File Directory Security Apply Job", + "state": "running", + "message": "Complete: Operation completed successfully. File ACLs modified using policy \"policy1\" on Vserver \"GBSMNAS80LD\". File count: 0. [0]", + "code": 0, + "start_time": "2021-06-29T05:25:26-04:00", + "end_time": "2021-06-29T05:25:26-04:00" + }, None), + 'job_response_record_failure': ( + 200, { + "uuid": "f03ccbb6-d8bb-11eb-ac81-00505690980f", + "description": "File Directory Security Apply Job", + "state": "failure", + "message": "Forcing some error for UT.", + "code": 0, + "start_time": "2021-06-29T05:25:26-04:00", + "end_time": "2021-06-29T05:25:26-04:00" + }, None), + 'generic_error': (500, None, "Expected error"), + 'rest_error': (400, None, {'message': '-error_message-', 'code': '-error_code-'}), + 'end_of_sequence': (None, None, "Unexpected call to send_request"), +} + + +def set_default_args(use_rest='auto'): + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + api = 'abc' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'api': api, + 'use_rest': use_rest + }) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_default_get(mock_request, patch_ansible): + ''' if no method is given, GET is the default ''' + args = dict(set_default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 1 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_any(mock_request, patch_ansible): + ''' We don't validate the method name, so ANYthing goes ''' + args = dict(set_default_args()) + args['method'] = 'ANY' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['files'] = {'fkey1': 'fitem1', 'fkey2': 'fitem2'} + set_module_args(args) + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 1 + headers = my_obj.rest_api.build_headers(accept='application/json') + expected_call = call('ANY', 'abc', args['query'], args['body'], headers, args['files']) + assert expected_call in mock_request.mock_calls + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_run_any_rest_error(mock_request, patch_ansible): + ''' We don't validate the method name, so ANYthing goes ''' + args = dict(set_default_args()) + args['method'] = 'ANY' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + set_module_args(args) + mock_request.side_effect = [ + SRR['rest_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error when calling 'abc': check error_message and error_code for details." + assert msg == exc.value.args[0]['msg'] + assert '-error_message-' == exc.value.args[0]['error_message'] + assert '-error_code-' == exc.value.args[0]['error_code'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_run_any_other_error(mock_request, patch_ansible): + ''' We don't validate the method name, so ANYthing goes ''' + args = dict(set_default_args()) + args['method'] = 'ANY' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + set_module_args(args) + mock_request.side_effect = [ + SRR['generic_error'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error when calling 'abc': Expected error" + assert msg == exc.value.args[0]['msg'] + assert 'Expected error' == exc.value.args[0]['error_message'] + assert exc.value.args[0]['error_code'] is None + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_post_async_no_job(mock_request, patch_ansible): + ''' POST async, but returns immediately ''' + args = dict(set_default_args()) + args['method'] = 'POST' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['wait_for_completion'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 1 + headers = my_obj.rest_api.build_headers(accept='application/json') + args['query'].update({'return_timeout': 30}) + expected_call = call('POST', 'abc', args['query'], json=args['body'], headers=headers, files=None) + assert expected_call in mock_request.mock_calls + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_post_async_with_job(mock_request, patch_ansible): + ''' POST async, but returns immediately ''' + args = dict(set_default_args()) + args['method'] = 'POST' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['wait_for_completion'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['job_id_record'], + SRR['job_response_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 + headers = my_obj.rest_api.build_headers(accept='application/json') + args['query'].update({'return_timeout': 30}) + expected_call = call('POST', 'abc', args['query'], json=args['body'], headers=headers, files=None) + assert expected_call in mock_request.mock_calls + + +# patch time to not wait between job retries +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_patch_async_with_job_loop(mock_request, mock_sleep, patch_ansible): + ''' POST async, but returns immediately ''' + args = dict(set_default_args()) + args['method'] = 'PATCH' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['wait_for_completion'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['job_id_record'], + SRR['job_response_record_running'], + SRR['job_response_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + headers = my_obj.rest_api.build_headers(accept='application/json') + args['query'].update({'return_timeout': 30}) + expected_call = call('PATCH', 'abc', args['query'], json=args['body'], headers=headers, files=None) + assert expected_call in mock_request.mock_calls + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_negative_delete(mock_request, mock_sleep, patch_ansible): + ''' POST async, but returns immediately ''' + args = dict(set_default_args()) + args['method'] = 'DELETE' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['wait_for_completion'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['job_id_record'], + SRR['job_response_record_running'], + SRR['job_response_record_failure'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error when calling 'abc': Forcing some error for UT." + assert msg == exc.value.args[0]['msg'] + assert 'Forcing some error for UT.' == exc.value.args[0]['error_message'] + assert exc.value.args[0]['error_code'] is None + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + headers = my_obj.rest_api.build_headers(accept='application/json') + args['query'].update({'return_timeout': 30}) + expected_call = call('DELETE', 'abc', args['query'], json=None, headers=headers) + assert expected_call in mock_request.mock_calls + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_any_async(mock_request, patch_ansible): + ''' We don't validate the method name, so ANYthing goes ''' + args = dict(set_default_args()) + args['method'] = 'ANY' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['files'] = {'fkey1': 'fitem1', 'fkey2': 'fitem2'} + args['wait_for_completion'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 1 + headers = my_obj.rest_api.build_headers(accept='application/json') + expected_call = call('ANY', 'abc', args['query'], args['body'], headers, args['files']) + assert expected_call in mock_request.mock_calls + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_run_main(mock_request, patch_ansible): + ''' We don't validate the method name, so ANYthing goes ''' + args = dict(set_default_args()) + args['method'] = 'ANY' + args['body'] = {'bkey1': 'bitem1', 'bkey2': 'bitem2'} + args['query'] = {'qkey1': 'qitem1', 'qkey2': 'qitem2'} + args['wait_for_completion'] = True + set_module_args(args) + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + my_main() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 1 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_build_headers(mock_request, patch_ansible): + ''' create cluster ''' + args = dict(set_default_args()) + set_module_args(args) + my_obj = my_module() + headers = my_obj.build_headers() + # TODO: in UT (and only in UT) module._name is not set properly. It shows as basic.py instead of 'na_ontap_restit' + assert headers == {'X-Dot-Client-App': 'basic.py/%s' % netapp_utils.COLLECTION_VERSION, 'accept': 'application/json'} + args['hal_linking'] = True + set_module_args(args) + my_obj = my_module() + headers = my_obj.build_headers() + assert headers == {'X-Dot-Client-App': 'basic.py/%s' % netapp_utils.COLLECTION_VERSION, 'accept': 'application/hal+json'} + # Accept header + args['accept_header'] = "multipart/form-data" + set_module_args(args) + my_obj = my_module() + headers = my_obj.build_headers() + assert headers == {'X-Dot-Client-App': 'basic.py/%s' % netapp_utils.COLLECTION_VERSION, 'accept': 'multipart/form-data'} diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_buckets.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_buckets.py new file mode 100644 index 000000000..2e15239da --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_buckets.py @@ -0,0 +1,739 @@ +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_s3_buckets \ + import NetAppOntapS3Buckets as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'nas_s3_bucket': (200, {"records": [{ + 'comment': '', + 'name': 'carchi-test-bucket1', + 'nas_path': '/', + 'policy': { + 'statements': [ + { + 'actions': ['GetObject', 'PutObject', 'DeleteObject', 'ListBucket'], + 'conditions': [ + {'operator': 'ip_address', 'source_ips': ['1.1.1.1/32', '1.2.2.0/24']}, + ], + 'effect': 'deny', + 'principals': [], + 'resources': ['carchi-test-bucket1', 'carchi-test-bucket1/*'], + 'sid': 1 + } + ] + }, + 'svm': { + 'name': 'ansibleSVM', + 'uuid': '685bd228' + }, + 'type': 'nas', + 'uuid': '3e5c4ac8'}], "num_records": 1}, None), + 'nas_s3_bucket_modify': (200, {"records": [{ + 'comment': '', + 'name': 'carchi-test-bucket1', + 'nas_path': '/', + 'policy': {'statements': []}, + 'svm': { + 'name': 'ansibleSVM', + 'uuid': '685bd228' + }, + 'type': 'nas', + 'uuid': '3e5c4ac8'}], "num_records": 1}, None), + 's3_bucket_more_policy': (200, {"records": [{ + 'comment': 'carchi8py was here again', + 'name': 'bucket1', + 'policy': { + 'statements': [ + { + "sid": 1, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1/32", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["bucket1", "bucket1/*"] + }, + { + "sid": 2, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1/32", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["bucket1", "bucket1/*"] + } + ] + }, + 'qos_policy': { + 'max_throughput_iops': 100, + 'max_throughput_mbps': 150, + 'min_throughput_iops': 0, + 'min_throughput_mbps': 0, + 'name': 'ansibleSVM_auto_gen_policy_9be26687_2849_11ed_9696_005056b3b297', + 'uuid': '9be28517-2849-11ed-9696-005056b3b297' + }, + 'size': 938860800, + 'svm': {'name': 'ansibleSVM', 'uuid': '969ansi97'}, + 'uuid': '9bdefd59-2849-11ed-9696-005056b3b297', + 'type': 's3', + 'volume': {'uuid': '1cd8a442-86d1-11e0-abcd-123478563412'}}], "num_records": 1}, None), + 's3_bucket_without_condition': (200, {"records": [{ + 'comment': 'carchi8py was here again', + 'name': 'bucket1', + 'policy': { + 'statements': [ + { + "sid": 1, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "principals": ["user1", "user2"], + "resources": ["bucket1", "bucket1/*"] + }, + { + "sid": 2, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "principals": ["user1", "user2"], + "resources": ["bucket1", "bucket1/*"] + } + ] + }, + 'qos_policy': { + 'max_throughput_iops': 100, + 'max_throughput_mbps': 150, + 'min_throughput_iops': 0, + 'min_throughput_mbps': 0, + 'name': 'ansibleSVM_auto_gen_policy_9be26687_2849_11ed_9696_005056b3b297', + 'uuid': '9be28517-2849-11ed-9696-005056b3b297' + }, + 'size': 938860800, + 'svm': {'name': 'ansibleSVM', 'uuid': '969ansi97'}, + 'uuid': '9bdefd59-2849-11ed-9696-005056b3b297', + 'volume': {'uuid': '1cd8a442-86d1-11e0-abcd-123478563412'}}], "num_records": 1}, None), + 's3_bucket_9_10': (200, { + "logical_used_size": 0, + "uuid": "414b29a1-3b26-11e9-bd58-0050568ea055", + "size": 1677721600, + "protection_status": {"destination": {}}, + "constituents_per_aggregate": 4, + "qos_policy": { + "max_throughput_iops": 10000, + "max_throughput_mbps": 500, + "name": "performance", + "min_throughput_iops": 2000, + "min_throughput_mbps": 500, + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + "policy": { + "statements": [ + { + "sid": "FullAccessToUser1", + "resources": ["bucket1", "bucket1/*"], + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "allow", + "conditions": [ + { + "operator": "ip-address", + "max_keys": ["1000"], + "delimiters": ["/"], + "source-ips": ["1.1.1.1", "1.2.2.0/24"], + "prefixes": ["pref"], + "usernames": ["user1"] + } + ], + "principals": ["user1", "group/grp1"] + } + ] + }, + "storage_service_level": "value", + "audit_event_selector": {"access": "all", "permission": "all"}, + "name": "bucket1", + "comment": "S3 bucket.", + "svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}, + "volume": {"uuid": "1cd8a442-86d1-11e0-abcd-123478563412"} + }, None), + 's3_bucket_9_8': (200, { + "logical_used_size": 0, + "uuid": "414b29a1-3b26-11e9-bd58-0050568ea055", + "size": 1677721600, + "protection_status": {"destination": {}}, + "constituents_per_aggregate": 4, + "qos_policy": { + "max_throughput_iops": 10000, + "max_throughput_mbps": 500, + "name": "performance", + "min_throughput_iops": 2000, + "min_throughput_mbps": 500, + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + "policy": { + "statements": [ + { + "sid": "FullAccessToUser1", + "resources": ["bucket1", "bucket1/*"], + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "allow", + "conditions": [ + { + "operator": "ip-address", + "max_keys": ["1000"], + "delimiters": ["/"], + "source-ips": ["1.1.1.1", "1.2.2.0/24"], + "prefixes": ["pref"], + "usernames": ["user1"] + } + ], + "principals": ["user1", "group/grp1"] + } + ] + }, + "storage_service_level": "value", + "name": "bucket1", + "comment": "S3 bucket.", + "svm": {"name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"}, + "volume": {"uuid": "1cd8a442-86d1-11e0-abcd-123478563412"} + }, None), + 'volume_info': (200, { + "aggregates": [{"name": "aggr1", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"}], + }, None), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'bucket1', + 'vserver': 'vserver' +} + +POLICY_ARGS = { + "statements": [{ + "sid": "FullAccessToUser1", + "resources": ["bucket1", "bucket1/*"], + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "allow", + "conditions": [ + { + "operator": "ip_address", + "max_keys": ["1000"], + "delimiters": ["/"], + "source_ips": ["1.1.1.1", "1.2.2.0/24"], + "prefixes": ["pref"], + "usernames": ["user1"] + } + ], + "principals": ["user1", "group/grp1"] + }] +} + +REAL_POLICY_ARGS = { + "statements": [{ + "sid": "FullAccessToUser1", + "resources": ["bucket1", "bucket1/*"], + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "allow", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "group/grp1"] + }] +} + +REAL_POLICY_WTIH_NUM_ARGS = { + "statements": [{ + "sid": 1, + "resources": ["bucket1", "bucket1/*"], + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "allow", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "group/grp1"] + }] +} + +MODIFY_POLICY_ARGS = { + "statements": [{ + "sid": "FullAccessToUser1", + "resources": ["bucket1", "bucket1/*"], + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "allow", + "conditions": [ + { + "operator": "ip_address", + "max_keys": ["100"], + "delimiters": ["/"], + "source_ips": ["2.2.2.2", "1.2.2.0/24"], + "prefixes": ["pref"], + "usernames": ["user2"] + } + ], + "principals": ["user1", "group/grp1"] + }] +} + + +MULTIPLE_POLICY_STATEMENTS = { + "statements": [ + { + "sid": 1, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["*"] + }, + { + "sid": 2, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["*"] + } + ] +} + + +SAME_POLICY_STATEMENTS = { + "statements": [ + { + "sid": 1, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["*"] + }, + { + "sid": 1, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["*"] + }, + ] +} + + +MULTIPLE_POLICY_CONDITIONS = { + "statements": [ + { + "sid": 1, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [ + {"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}, + {"operator": "not_ip_address", "source_ips": ["2.1.1.1", "1.2.2.0/24"]} + ], + "principals": ["user1", "user2"], + "resources": ["*"] + }, + { + "sid": 2, + "actions": ["GetObject", "PutObject", "DeleteObject", "ListBucket"], + "effect": "deny", + "conditions": [{"operator": "ip_address", "source_ips": ["1.1.1.1", "1.2.2.0/24"]}], + "principals": ["user1", "user2"], + "resources": ["*"] + } + ] +} + + +NAS_S3_BUCKET = { + 'comment': '', + 'name': 'carchi-test-bucket1', + 'nas_path': '/', + 'policy': { + 'statements': [ + { + 'actions': ['GetObject', 'PutObject', 'DeleteObject', 'ListBucket'], + 'conditions': [{'operator': 'ip_address', 'source_ips': ['1.1.1.1/32', '1.2.2.0/24']}], + 'effect': 'deny', + 'principals': [], + 'resources': ['carchi-test-bucket1', 'carchi-test-bucket1/*'], + 'sid': 1 + } + ] + }, + 'vserver': 'ansibleSVM', + 'type': 'nas' +} + + +QOS_ARGS = { + "max_throughput_iops": 10000, + "max_throughput_mbps": 500, + "name": "performance", + "min_throughput_iops": 2000, + "min_throughput_mbps": 500, +} + +MODIFY_QOS_ARGS = { + "max_throughput_iops": 20000, + "max_throughput_mbps": 400, + "name": "performance", + "min_throughput_iops": 3000, + "min_throughput_mbps": 400, +} + +AUDIT_EVENT = { + "access": "all", + "permission": "all" +} + +MODIFY_AUDIT_EVENT = { + "access": "read", + "permission": "allow" +} + + +def test_low_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']) + ]) + error = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: na_ontap_s3_bucket only supports REST, and requires ONTAP 9.8.0 or later. Found: 9.7.0.' + assert msg in error + + +def test_get_s3_bucket_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_bucket() is None + + +def test_get_s3_bucket_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching S3 bucket bucket1: calling: protocols/s3/buckets: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_s3_bucket, 'fail')['msg'] + + +def test_get_s3_bucket_9_8(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_8']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_bucket() is not None + + +def test_get_s3_bucket_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_10']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_bucket() is not None + + +def test_create_s3_bucket_9_8(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']), + ('POST', 'protocols/s3/buckets', SRR['empty_good']) + ]) + module_args = {'comment': 'carchi8py was here', + 'aggregates': ['aggr1'], + 'constituents_per_aggregate': 4, + 'size': 838860800, + 'policy': POLICY_ARGS, + 'qos_policy': QOS_ARGS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_s3_bucket_9_10_and_9_12(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']), + ('POST', 'protocols/s3/buckets', SRR['empty_good']), + # create with type + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']), + ('POST', 'protocols/s3/buckets', SRR['empty_good']) + ]) + module_args = {'comment': 'carchi8py was here', + 'aggregates': ['aggr1'], + 'constituents_per_aggregate': 4, + 'size': 838860800, + 'policy': POLICY_ARGS, + 'qos_policy': QOS_ARGS, + 'audit_event_selector': AUDIT_EVENT} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + module_args['type'] = 's3' + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_s3_nas_bucket_create_modify_delete(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']), + ('POST', 'protocols/s3/buckets', SRR['success']), + # idemptent check + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['nas_s3_bucket']), + # modify empty policy + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['nas_s3_bucket']), + ('PATCH', 'protocols/s3/buckets/685bd228/3e5c4ac8', SRR['success']), + # idempotent check + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['nas_s3_bucket_modify']), + # delete nas bucket + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['nas_s3_bucket_modify']), + ('DELETE', 'protocols/s3/buckets/685bd228/3e5c4ac8', SRR['success']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, NAS_S3_BUCKET)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, NAS_S3_BUCKET)['changed'] is False + NAS_S3_BUCKET['policy']['statements'] = [] + assert create_and_apply(my_module, DEFAULT_ARGS, NAS_S3_BUCKET)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, NAS_S3_BUCKET)['changed'] is False + NAS_S3_BUCKET['state'] = 'absent' + assert create_and_apply(my_module, DEFAULT_ARGS, NAS_S3_BUCKET)['changed'] + + +def test_modify_s3_bucket_type_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_more_policy']) + ]) + assert 'Error: cannot modify bucket type.' in create_and_apply(my_module, DEFAULT_ARGS, {'type': 'nas'}, fail=True)['msg'] + + +def test_create_with_real_policy_s3_bucket_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']), + ('POST', 'protocols/s3/buckets', SRR['empty_good']) + ]) + module_args = {'comment': 'carchi8py was here', + 'aggregates': ['aggr1'], + 'constituents_per_aggregate': 4, + 'size': 838860800, + 'policy': REAL_POLICY_ARGS, + 'qos_policy': QOS_ARGS, + 'audit_event_selector': AUDIT_EVENT} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_with_real_policy_with_sid_as_number_s3_bucket_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['empty_records']), + ('POST', 'protocols/s3/buckets', SRR['empty_good']) + ]) + module_args = {'comment': 'carchi8py was here', + 'aggregates': ['aggr1'], + 'constituents_per_aggregate': 4, + 'size': 838860800, + 'policy': REAL_POLICY_WTIH_NUM_ARGS, + 'qos_policy': QOS_ARGS, + 'audit_event_selector': AUDIT_EVENT} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_s3_bucket_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('POST', 'protocols/s3/buckets', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'carchi8py was here' + my_obj.parameters['aggregates'] = ['aggr1'] + my_obj.parameters['constituents_per_aggregate'] = 4 + my_obj.parameters['size'] = 838860800 + error = expect_and_capture_ansible_exception(my_obj.create_s3_bucket, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating S3 bucket bucket1: calling: protocols/s3/buckets: got Expected error.' == error + + +def test_delete_s3_bucket(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_10']), + ('DELETE', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_s3_bucket_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('DELETE', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.uuid = '414b29a1-3b26-11e9-bd58-0050568ea055' + my_obj.svm_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.delete_s3_bucket, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting S3 bucket bucket1: calling: ' \ + 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055: got Expected error.' == error + + +def test_modify_s3_bucket_9_8(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_8']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['empty_good']) + ]) + module_args = {'comment': 'carchi8py was here', + 'size': 943718400, + 'policy': MODIFY_POLICY_ARGS, + 'qos_policy': MODIFY_QOS_ARGS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_bucket_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_10']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['empty_good']) + ]) + module_args = {'comment': 'carchi8py was here', + 'size': 943718400, + 'policy': MODIFY_POLICY_ARGS, + 'qos_policy': MODIFY_QOS_ARGS, + 'audit_event_selector': MODIFY_AUDIT_EVENT} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_bucket_policy_statements(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_10']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['empty_good']), + # add multiple statements. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_more_policy']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + # try to modify with identical statements. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_more_policy']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/969ansi97/9bdefd59-2849-11ed-9696-005056b3b297', SRR['empty_good']), + # empty policy statements. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_10']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['empty_good']) + ]) + module_args = {'policy': MULTIPLE_POLICY_STATEMENTS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + module_args = {'policy': SAME_POLICY_STATEMENTS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, {'policy': {'statements': []}}) + + +def test_modify_s3_bucket_policy_statements_conditions(): + register_responses([ + # modify if desired statements has conditions and current statement conditions is None. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_without_condition']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/969ansi97/9bdefd59-2849-11ed-9696-005056b3b297', SRR['empty_good']), + # empty policy statements conditions. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_more_policy']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/969ansi97/9bdefd59-2849-11ed-9696-005056b3b297', SRR['empty_good']), + # add multiple conditions. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_more_policy']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ('PATCH', 'protocols/s3/buckets/969ansi97/9bdefd59-2849-11ed-9696-005056b3b297', SRR['empty_good']) + ]) + module_args = {'policy': MULTIPLE_POLICY_STATEMENTS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + module_args = {'policy': MULTIPLE_POLICY_STATEMENTS.copy()} + module_args['policy']['statements'][0]['conditions'] = [] + module_args['policy']['statements'][1]['conditions'] = [] + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + module_args = {'policy': MULTIPLE_POLICY_CONDITIONS} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_when_try_set_empty_dict_to_policy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + module_args = {'policy': {'statements': [{}]}} + assert 'cannot set empty dict' in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_modify_s3_bucket_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('PATCH', 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055', + SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'carchi8py was here' + my_obj.parameters['size'] = 943718400 + current = {'comment': 'carchi8py was here', 'size': 943718400} + my_obj.uuid = '414b29a1-3b26-11e9-bd58-0050568ea055' + my_obj.svm_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.modify_s3_bucket, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error modifying S3 bucket bucket1: calling: ' \ + 'protocols/s3/buckets/02c9e252-41be-11e9-81d5-00a0986138f7/414b29a1-3b26-11e9-bd58-0050568ea055: got Expected error.' == error + + +def test_new_aggr_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_8']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['volume_info']), + ]) + module_args = {'aggregates': ['aggr2']} + error = 'Aggregates cannot be modified for S3 bucket bucket1' + assert create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_volume_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'protocols/s3/buckets', SRR['s3_bucket_9_8']), + ('GET', 'storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412', SRR['generic_error']), + ]) + module_args = {'aggregates': ['aggr2']} + error = 'calling: storage/volumes/1cd8a442-86d1-11e0-abcd-123478563412: got Expected error.' + assert create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_groups.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_groups.py new file mode 100644 index 000000000..6b204eadd --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_groups.py @@ -0,0 +1,319 @@ +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_s3_groups \ + import NetAppOntapS3Groups as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 's3_group_no_user_policy': (200, { + "records": [ + { + "comment": "Admin group", + "name": "carchi8py_group", + "id": "5", + "svm": { + "name": "svm1", + "uuid": "e3cb5c7f-cd20" + } + } + ], + "num_records": 1 + }, None), + 's3_group': (200, { + "records": [ + { + "comment": "Admin group", + "name": "carchi8py_group", + "users": [ + { + "name": "carchi8py", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + } + ], + "policies": [ + { + "name": "my_policy", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + } + ], + "id": "5", + "svm": { + "name": "svm1", + "uuid": "e3cb5c7f-cd20" + } + } + ], + "num_records": 1 + }, None), + 's3_group2': (200, { + "records": [ + { + "comment": "Admin group", + "name": "carchi8py_group", + "users": [ + { + "name": "carchi8py", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + }, + { + "name": "user2", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + } + ], + "policies": [ + { + "name": "my_policy", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + }, + { + "name": "my_policy2", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + } + ], + "id": "5", + "svm": { + "name": "svm1", + "uuid": "e3cb5c7f-cd20" + } + } + ], + "num_records": 1 + }, None), + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None) +}) + +USER = { + 'name': 'carchi8py' +} + +POLICY = { + 'name': 'my_policy' +} + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'carchi8py_group', + 'vserver': 'vserver', + 'users': [USER], + 'policies': [POLICY] +} + + +def test_low_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']) + ]) + error = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: na_ontap_s3_groups only supports REST, and requires ONTAP 9.8.0 or later. Found: 9.7.0.' + assert msg in error + + +def test_get_s3_groups_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_groups() is None + + +def test_get_s3_groups_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching S3 groups carchi8py_group: calling: protocols/s3/services/e3cb5c7f-cd20/groups: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_s3_groups, 'fail')['msg'] + + +def test_create_s3_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['empty_records']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['empty_good']) + ]) + module_args = { + 'comment': 'this is a s3 group', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_s3_group_with_multi_user_policies(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['empty_records']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['empty_good']) + ]) + module_args = { + 'comment': 'this is a s3 group', + 'users': [{'name': 'carchi8py'}, {'name': 'foo'}], + 'policies': [{'name': 'policy1'}, {'name': 'policy2'}] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_s3_group_error_no_users(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['empty_records']), + ]) + args = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'carchi8py_group', + 'vserver': 'vserver', + 'policies': [POLICY] + } + error = create_and_apply(my_module, args, {}, 'fail')['msg'] + print('Info: %s' % error) + assert 'policies and users are required for a creating a group.' == error + + +def test_create_s3_group_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a s3 group' + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.create_s3_groups, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating S3 groups carchi8py_group: calling: protocols/s3/services/e3cb5c7f-cd20/groups: got Expected error.' == error + + +def test_delete_s3_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['s3_group']), + ('DELETE', 'protocols/s3/services/e3cb5c7f-cd20/groups/5', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_s3_group_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('DELETE', 'protocols/s3/services/e3cb5c7f-cd20/groups/5', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.svm_uuid = 'e3cb5c7f-cd20' + my_obj.group_id = 5 + error = expect_and_capture_ansible_exception(my_obj.delete_s3_groups, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting S3 group carchi8py_group: calling: protocols/s3/services/e3cb5c7f-cd20/groups/5: got Expected error.' == error + + +def test_modify_s3_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['s3_group']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/groups/5', SRR['empty_good']) + ]) + module_args = { + 'comment': 'this is a modify comment', + 'users': [{'name': 'carchi8py'}, {'name': 'user2'}], + 'policies': [{'name': 'policy1'}, {'name': 'policy2'}] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_group_no_current_user_policy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/groups', SRR['s3_group_no_user_policy']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/groups/5', SRR['empty_good']) + ]) + module_args = { + 'users': [{'name': 'carchi8py'}, {'name': 'user2'}], + 'policies': [{'name': 'policy1'}, {'name': 'policy2'}] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_group_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/groups/5', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a modified s3 service' + current = {'comment': 'this is a modified s3 service'} + my_obj.svm_uuid = 'e3cb5c7f-cd20' + my_obj.group_id = 5 + error = expect_and_capture_ansible_exception(my_obj.modify_s3_groups, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error modifying S3 group carchi8py_group: calling: protocols/s3/services/e3cb5c7f-cd20/groups/5: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_policies.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_policies.py new file mode 100644 index 000000000..eacb4e8c1 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_policies.py @@ -0,0 +1,220 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_s3_policies \ + import NetAppOntapS3Policies as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 's3_policy': (200, { + "records": [ + { + "statements": [ + { + "sid": "FullAccessToBucket1", + "resources": [ + "bucket1", + "bucket1/*" + ], + "index": 0, + "actions": [ + "GetObject", + "PutObject", + "DeleteObject", + "ListBucket" + ], + "effect": "allow" + } + ], + "comment": "S3 policy.", + "name": "Policy1", + "svm": { + "name": "policy_name", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "read-only": True + } + ], + "num_records": 1 + }, None), + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'policy_name', + 'vserver': 'vserver' +} + +STATEMENT = { + "sid": "FullAccessToUser1", + "resources": [ + "bucket1", + "bucket1/*" + ], + "actions": [ + "GetObject", + "PutObject", + "DeleteObject", + "ListBucket" + ], + "effect": "allow", +} + +STATEMENT2 = { + "sid": "FullAccessToUser1", + "resources": [ + "bucket1", + "bucket1/*", + "bucket2", + "bucket2/*" + ], + "actions": [ + "GetObject", + "PutObject", + "DeleteObject", + "ListBucket" + ], + "effect": "allow", +} + + +def test_low_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']) + ]) + error = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: na_ontap_s3_policies only supports REST, and requires ONTAP 9.8.0 or later. Found: 9.7.0.' + assert msg in error + + +def test_get_s3_policies_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_policies() is None + + +def test_get_s3_policies_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching S3 policy policy_name: calling: protocols/s3/services/e3cb5c7f-cd20/policies: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_s3_policies, 'fail')['msg'] + + +def test_create_s3_policies(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['empty_records']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['empty_good']) + ]) + module_args = { + 'comment': 'this is a s3 user', + 'statements': [STATEMENT] + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_s3_policies_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a s3 policies' + my_obj.parameters['statements'] = [STATEMENT] + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.create_s3_policies, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating S3 policy policy_name: calling: protocols/s3/services/e3cb5c7f-cd20/policies: got Expected error.' == error + + +def test_delete_s3_policies(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['s3_policy']), + ('DELETE', 'protocols/s3/services/e3cb5c7f-cd20/policies/policy_name', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_s3_policies_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('DELETE', 'protocols/s3/services/e3cb5c7f-cd20/policies/policy_name', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.delete_s3_policies, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting S3 policy policy_name: calling: protocols/s3/services/e3cb5c7f-cd20/policies/policy_name: got Expected error.' == error + + +def test_modify_s3_policies(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/policies', SRR['s3_policy']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/policies/policy_name', SRR['empty_good']) + ]) + module_args = {'comment': 'this is a modify comment', 'statements': [STATEMENT2]} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_policies_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/policies/policy_name', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a modified s3 service' + my_obj.parameters['statements'] = [STATEMENT2] + current = {'comment': 'this is a modified s3 service', 'statements': [STATEMENT2]} + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.modify_s3_policies, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error modifying S3 policy policy_name: calling: protocols/s3/services/e3cb5c7f-cd20/policies/policy_name: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_services.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_services.py new file mode 100644 index 000000000..fce59093a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_services.py @@ -0,0 +1,176 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_s3_services \ + import NetAppOntapS3Services as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 's3_service': (200, { + "svm": { + "uuid": "08c8a385-b1ac-11ec-bd2e-005056b3b297", + "name": "ansibleSVM", + }, + "name": "carchi-test", + "enabled": True, + "buckets": [ + { + "name": "carchi-test-bucket2" + }, + { + "name": "carchi-test-bucket" + } + ], + "users": [ + { + "name": "root" + } + ], + "comment": "this is a s3 service", + "certificate": { + "name": "ansibleSVM_16E1C1284D889609", + }, + }, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'service1', + 'vserver': 'vserver' +} + + +def test_low_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']) + ]) + error = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: na_ontap_s3_services only supports REST, and requires ONTAP 9.8.0 or later. Found: 9.7.0.' + assert msg in error + + +def test_get_s3_service_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/services', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_service() is None + + +def test_get_s3_service_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/services', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching S3 service service1: calling: protocols/s3/services: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_s3_service, 'fail')['msg'] + + +def test_create_s3_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/services', SRR['empty_records']), + ('POST', 'protocols/s3/services', SRR['empty_good']) + ]) + module_args = { + 'enabled': True, + 'comment': 'this is a s3 service', + 'certificate_name': 'cert1', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_s3_service_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('POST', 'protocols/s3/services', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['enabled'] = True + my_obj.parameters['comment'] = 'this is a s3 service' + my_obj.parameters['certificate_name'] = 'cert1' + error = expect_and_capture_ansible_exception(my_obj.create_s3_service, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating S3 service service1: calling: protocols/s3/services: got Expected error.' == error + + +def test_delete_s3_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/services', SRR['s3_service']), + ('DELETE', 'protocols/s3/services/08c8a385-b1ac-11ec-bd2e-005056b3b297', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_s3_service_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('DELETE', 'protocols/s3/services/08c8a385-b1ac-11ec-bd2e-005056b3b297', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.svm_uuid = '08c8a385-b1ac-11ec-bd2e-005056b3b297' + error = expect_and_capture_ansible_exception(my_obj.delete_s3_service, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting S3 service service1: calling: protocols/s3/services/08c8a385-b1ac-11ec-bd2e-005056b3b297: got Expected error.' == error + + +def test_modify_s3_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/s3/services', SRR['s3_service']), + ('PATCH', 'protocols/s3/services/08c8a385-b1ac-11ec-bd2e-005056b3b297', SRR['empty_good']) + ]) + module_args = {'comment': 'this is a modified s3 service', + 'enabled': False, + 'certificate_name': 'cert2', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_service_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('PATCH', 'protocols/s3/services/08c8a385-b1ac-11ec-bd2e-005056b3b297', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a modified s3 service' + current = {'comment': 'this is a modified s3 service'} + my_obj.svm_uuid = '08c8a385-b1ac-11ec-bd2e-005056b3b297' + error = expect_and_capture_ansible_exception(my_obj.modify_s3_service, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error modifying S3 service service1: calling: protocols/s3/services/08c8a385-b1ac-11ec-bd2e-005056b3b297: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_users.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_users.py new file mode 100644 index 000000000..71850e510 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_s3_users.py @@ -0,0 +1,194 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_s3_users \ + import NetAppOntapS3Users as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 's3_user': (200, { + "records": [ + { + "comment": "S3 user", + "name": "carchi8py", + "svm": { + "name": "svm1", + "uuid": "e3cb5c7f-cd20" + } + } + ], + "num_records": 1 + }, None), + 's3_user_created': (200, { + "records": [ + { + 'access_key': 'random_access_key', + 'secret_key': 'random_secret_key' + } + ], + "num_records": 1 + }, None), + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'carchi8py', + 'vserver': 'vserver' +} + + +def test_low_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']) + ]) + error = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error: na_ontap_s3_users only supports REST, and requires ONTAP 9.8.0 or later. Found: 9.7.0.' + assert msg in error + + +def test_get_s3_users_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_s3_user() is None + + +def test_get_s3_users_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error fetching S3 user carchi8py: calling: protocols/s3/services/e3cb5c7f-cd20/users: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_s3_user, 'fail')['msg'] + + +def test_create_s3_users(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['empty_records']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['s3_user_created']) + ]) + module_args = { + 'comment': 'this is a s3 user', + } + result = create_and_apply(my_module, DEFAULT_ARGS, module_args) + assert result['changed'] + assert result['secret_key'] == 'random_secret_key' + assert result['access_key'] == 'random_access_key' + + +def test_create_s3_users_fail_randomly(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['empty_records']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['empty_good']) + ]) + module_args = { + 'comment': 'this is a s3 user', + } + error = create_and_apply(my_module, DEFAULT_ARGS, module_args, 'fail')['msg'] + assert 'Error creating S3 user carchi8py' == error + + +def test_create_s3_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('POST', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a s3 user' + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.create_s3_user, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating S3 user carchi8py: calling: protocols/s3/services/e3cb5c7f-cd20/users: got Expected error.' == error + + +def test_delete_s3_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['s3_user']), + ('DELETE', 'protocols/s3/services/e3cb5c7f-cd20/users/carchi8py', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_s3_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('DELETE', 'protocols/s3/services/e3cb5c7f-cd20/users/carchi8py', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['state'] = 'absent' + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.delete_s3_user, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting S3 user carchi8py: calling: protocols/s3/services/e3cb5c7f-cd20/users/carchi8py: got Expected error.' == error + + +def test_modify_s3_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/s3/services/e3cb5c7f-cd20/users', SRR['s3_user']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/users/carchi8py', SRR['empty_good']) + ]) + module_args = {'comment': 'this is a modify comment'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_s3_user_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('PATCH', 'protocols/s3/services/e3cb5c7f-cd20/users/carchi8py', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['comment'] = 'this is a modified s3 service' + current = {'comment': 'this is a modified s3 service'} + my_obj.svm_uuid = 'e3cb5c7f-cd20' + error = expect_and_capture_ansible_exception(my_obj.modify_s3_user, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error modifying S3 user carchi8py: calling: protocols/s3/services/e3cb5c7f-cd20/users/carchi8py: got Expected error.' == error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py new file mode 100644 index 000000000..866dd3a58 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_certificates.py @@ -0,0 +1,509 @@ +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_security_certificates """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import copy +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_certificates \ + import NetAppOntapSecurityCertificates as my_module, main as my_main # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'empty_records': (200, {'records': []}, None), + 'get_uuid': (200, {'records': [{'uuid': 'ansible'}]}, None), + 'get_multiple_records': (200, {'records': [{'uuid': 'ansible'}, {'uuid': 'second'}]}, None), + 'error_unexpected_name': (200, None, {'message': 'Unexpected argument "name".'}), + 'error_duplicate_entry': (200, None, {'message': 'duplicate entry', 'target': 'uuid'}), + 'error_some_error': (200, None, {'message': 'some error'}), +} + +NAME_ERROR = "Error calling API: security/certificates - ONTAP 9.6 and 9.7 do not support 'name'. Use 'common_name' and 'type' as a work-around." +TYPE_ERROR = "Error calling API: security/certificates - When using 'common_name', 'type' is required." +EXPECTED_ERROR = "Error calling API: security/certificates - Expected error" + + +def set_default_args(): + return dict({ + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'name_for_certificate' + }) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + set_module_args({}) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_get_certificate_called(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['end_of_sequence'] + ] + set_module_args(set_default_args()) + my_obj = my_module() + assert my_obj.get_certificate() is not None + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_error(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + set_module_args(set_default_args()) + with pytest.raises(AnsibleFailJson) as exc: + my_main() + assert exc.value.args[0]['msg'] == EXPECTED_ERROR + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create_failed(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], # validate data vserver exist. + SRR['empty_records'], # get certificate -> not found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'type': 'client_ca', + 'vserver': 'abc', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = 'Error creating or installing certificate: one or more of the following options are missing:' + assert exc.value.args[0]['msg'].startswith(msg) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successful_create(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], # validate data vserver exist. + SRR['empty_records'], # get certificate -> not found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'type': 'client_ca', + 'vserver': 'abc', + 'common_name': 'cname' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_idempotent_create(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], # validate data vserver exist. + SRR['get_uuid'], # get certificate -> found + SRR['end_of_sequence'] + ] + data = { + 'type': 'client_ca', + 'vserver': 'abc', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_create_duplicate_entry(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_records'], # get certificate -> not found + copy.deepcopy(SRR['error_duplicate_entry']), # code under test modifies error in place + SRR['end_of_sequence'] + ] + data = { + 'type': 'client_ca', + 'common_name': 'cname' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print('EXC', exc.value.args[0]['msg']) + for fragment in ('Error creating or installing certificate: {', + "'message': 'duplicate entry. Same certificate may already exist under a different name.'", + "'target': 'cluster'"): + assert fragment in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successful_delete(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], # get certificate -> found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'state': 'absent', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_idempotent_delete(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_records'], # get certificate -> not found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'state': 'absent', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_delete(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], # get certificate -> found + SRR['error_some_error'], + SRR['end_of_sequence'] + ] + data = { + 'state': 'absent', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error deleting certificate: {'message': 'some error'}" + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_multiple_records(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_multiple_records'], # get certificate -> 2 records! + SRR['end_of_sequence'] + ] + data = { + 'state': 'absent', + 'common_name': 'cname', + 'type': 'client_ca', + } + data.update(set_default_args()) + data.pop('name') + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Duplicate records with same common_name are preventing safe operations: {'records': [{'uuid': 'ansible'}, {'uuid': 'second'}]}" + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successful_sign(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['get_uuid'], # get certificate -> found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'expiry_time': 'et' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_sign(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['get_uuid'], # get certificate -> found + SRR['error_some_error'], + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'expiry_time': 'et' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "Error signing certificate: {'message': 'some error'}" + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_failed_sign_missing_ca(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['empty_records'], # get certificate -> not found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "signing certificate with name '%s' not found on svm: %s" % (data['name'], data['vserver']) + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_failed_sign_absent(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['get_uuid'], # get certificate -> found + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'state': 'absent' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "'signing_request' is not supported with 'state' set to 'absent'" + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_failed_on_name(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['error_unexpected_name'], # get certificate -> error + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'state': 'absent', + 'ignore_name_if_not_supported': False, + 'common_name': 'common_name', + 'type': 'root_ca' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == NAME_ERROR + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_cannot_ignore_name_error_no_common_name(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['error_unexpected_name'], # get certificate -> error + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'state': 'absent', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == NAME_ERROR + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_cannot_ignore_name_error_no_type(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['error_unexpected_name'], # get certificate -> error + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'state': 'absent', + 'common_name': 'common_name' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert exc.value.args[0]['msg'] == TYPE_ERROR + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_ignore_name_error(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['error_unexpected_name'], # get certificate -> error + SRR['get_uuid'], # get certificate -> found + SRR['end_of_sequence'] + ] + data = { + 'vserver': 'abc', + 'signing_request': 'CSR', + 'state': 'absent', + 'common_name': 'common_name', + 'type': 'root_ca' + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + msg = "'signing_request' is not supported with 'state' set to 'absent'" + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_successful_create_name_error(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_uuid'], + SRR['error_unexpected_name'], # get certificate -> error + SRR['empty_records'], # get certificate -> not found + SRR['empty_good'], + SRR['end_of_sequence'] + ] + data = { + 'common_name': 'cname', + 'type': 'client_ca', + 'vserver': 'abc', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + print(mock_request.mock_calls) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_data_vserver_not_exist(mock_request): + mock_request.side_effect = [ + SRR['is_rest'], + SRR['empty_records'], + SRR['end_of_sequence'] + ] + data = { + 'common_name': 'cname', + 'type': 'client_ca', + 'vserver': 'abc', + } + data.update(set_default_args()) + set_module_args(data) + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + assert 'Error vserver abc does not exist or is not a data vserver.' in exc.value.args[0]['msg'] + + +def test_rest_negative_no_name_and_type(): + data = { + 'common_name': 'cname', + # 'type': 'client_ca', + 'vserver': 'abc', + } + data.update(set_default_args()) + data.pop('name') + set_module_args(data) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = "Error: 'name' or ('common_name' and 'type') are required parameters." + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_negative_ZAPI_only(mock_request): + mock_request.side_effect = [ + SRR['is_zapi'], + SRR['end_of_sequence'] + ] + set_module_args(set_default_args()) + with pytest.raises(AnsibleFailJson) as exc: + my_obj = my_module() + print(exc.value.args[0]) + msg = "na_ontap_security_certificates only supports REST, and requires ONTAP 9.6 or later. - Unreachable" + assert msg == exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_config.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_config.py new file mode 100644 index 000000000..1ffdfbc02 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_config.py @@ -0,0 +1,254 @@ +# (c) 2021-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +''' unit tests ONTAP Ansible module: na_ontap_security_config ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + call_main, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_config \ + import NetAppOntapSecurityConfig as security_config_module, main as my_main # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'security_config_record': (200, { + "records": [{ + "is_fips_enabled": False, + "supported_protocols": ['TLSv1.3', 'TLSv1.2', 'TLSv1.1'], + "supported_cipher_suites": 'TLS_RSA_WITH_AES_128_CCM_8' + }], "num_records": 1 + }, None), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +security_config_info = { + 'num-records': 1, + 'attributes': { + 'security-config-info': { + "interface": 'ssl', + "is-fips-enabled": False, + "supported-protocols": ['TLSv1.2', 'TLSv1.1'], + "supported-ciphers": 'ALL:!LOW:!aNULL:!EXP:!eNULL:!3DES:!DES:!RC4' + } + }, +} + + +ZRR = zapi_responses({ + 'security_config_info': build_zapi_response(security_config_info) +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'never', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + security_config_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_error_get_security_config_info(): + register_responses([ + ('ZAPI', 'security-config-get', ZRR['error']) + ]) + module_args = { + "name": 'ssl', + "is_fips_enabled": False, + "supported_protocols": ['TLSv1.2', 'TLSv1.1'] + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error getting security config for interface" + assert msg in error + + +def test_get_security_config_info(): + register_responses([ + ('security-config-get', ZRR['security_config_info']) + ]) + security_obj = create_module(security_config_module, DEFAULT_ARGS) + result = security_obj.get_security_config() + assert result + + +def test_modify_security_config_fips(): + register_responses([ + ('ZAPI', 'security-config-get', ZRR['security_config_info']), + ('ZAPI', 'security-config-modify', ZRR['success']) + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.3', 'TLSv1.2'], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_security_config_fips(): + register_responses([ + ('ZAPI', 'security-config-get', ZRR['security_config_info']), + ('ZAPI', 'security-config-modify', ZRR['error']) + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.3', 'TLSv1.2'], + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert "Error modifying security config for interface" in error + + +def test_error_security_config(): + register_responses([ + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.2', 'TLSv1.1', 'TLSv1'], + } + error = create_module(security_config_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'If fips is enabled then TLSv1 is not a supported protocol' in error + + +def test_error_security_config_supported_ciphers(): + register_responses([ + ]) + module_args = { + "is_fips_enabled": True, + "supported_ciphers": 'ALL:!LOW:!aNULL:!EXP:!eNULL:!3DES:!DES:!RC4', + } + error = create_module(security_config_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'If fips is enabled then supported ciphers should not be specified' in error + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always' +} + + +def test_rest_error_get(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', '/security', SRR['generic_error']), + ]) + module_args = { + "is_fips_enabled": False, + "supported_protocols": ['TLSv1.2', 'TLSv1.1'] + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + assert "Error on getting security config: calling: /security: got Expected error." in error + + +def test_rest_get_security_config(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', '/security', SRR['security_config_record']), + ]) + module_args = { + "is_fips_enabled": False, + "supported_protocols": ['TLSv1.2', 'TLSv1.1'] + } + security_obj = create_module(security_config_module, ARGS_REST, module_args) + result = security_obj.get_security_config_rest() + assert result + + +def test_rest_modify_security_config(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', '/security', SRR['security_config_record']), + ('PATCH', '/security', SRR['success']), + ]) + module_args = { + "is_fips_enabled": False, + "supported_protocols": ['TLSv1.3', 'TLSv1.2', 'TLSv1.1'], + "supported_cipher_suites": 'TLS_RSA_WITH_AES_128_CCM' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_rest_error_security_config(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.2', 'TLSv1.1', 'TLSv1'], + "supported_cipher_suites": 'TLS_RSA_WITH_AES_128_CCM' + } + error = create_module(security_config_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'If fips is enabled then TLSv1 is not a supported protocol' in error + + +def test_rest_error_security_config_protocol(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.2', 'TLSv1.1'], + "supported_cipher_suites": 'TLS_RSA_WITH_AES_128_CCM' + } + error = create_module(security_config_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'If fips is enabled then TLSv1.1 is not a supported protocol' in error + + +def test_rest_error_modify_security_config(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', '/security', SRR['security_config_record']), + ('PATCH', '/security', SRR['generic_error']), + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.3', 'TLSv1.2'], + "supported_cipher_suites": 'TLS_RSA_WITH_AES_128_CCM' + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + assert "Error on modifying security config: calling: /security: got Expected error." in error + + +def test_rest_modify_security_config_fips(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', '/security', SRR['security_config_record']), + ('PATCH', '/security', SRR['success']), + ]) + module_args = { + "is_fips_enabled": True, + "supported_protocols": ['TLSv1.3', 'TLSv1.2'], + "supported_cipher_suites": 'TLS_RSA_WITH_AES_128_CCM' + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_ca_certificate.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_ca_certificate.py new file mode 100644 index 000000000..3728619eb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_ca_certificate.py @@ -0,0 +1,140 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception, call_main, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_ipsec_ca_certificate \ + import NetAppOntapSecurityCACertificate as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'cert1', + 'use_rest': 'always' +} + + +SRR = rest_responses({ + 'ipsec_ca_svm_scope': (200, {"records": [{ + 'name': 'cert1', + 'svm': {'name': 'svm4'}, + 'uuid': '380a12f7' + }], "num_records": 1}, None), + 'ipsec_ca_cluster_scope': (200, {"records": [{ + 'name': 'cert2', + 'scope': 'cluster', + 'uuid': '878eaa35'}], "num_records": 1}, None), + 'error_ipsec_ca_not_exist': (404, None, {'code': 4, 'message': "entry doesn't exist"}), +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "name"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_create_security_ipsec_ca_certificate_svm(): + ''' create ipsec ca certificates in svm ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['ipsec_ca_svm_scope']), # get certificate uuid. + ('GET', 'security/ipsec/ca-certificates/380a12f7', SRR['error_ipsec_ca_not_exist']), # ipsec ca does not exist. + ('POST', 'security/ipsec/ca-certificates', SRR['success']), # create. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['ipsec_ca_svm_scope']), # get certificate uuid. + ('GET', 'security/ipsec/ca-certificates/380a12f7', SRR['ipsec_ca_svm_scope']), # ipsec ca does not exist. + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'svm': 'svm4'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'svm': 'svm4'})['changed'] + + +def test_create_security_ipsec_ca_certificate_cluster(): + ''' create ipsec ca certificates in cluster ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['ipsec_ca_cluster_scope']), + ('GET', 'security/ipsec/ca-certificates/878eaa35', SRR['error_ipsec_ca_not_exist']), + ('POST', 'security/ipsec/ca-certificates', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['ipsec_ca_cluster_scope']), + ('GET', 'security/ipsec/ca-certificates/878eaa35', SRR['ipsec_ca_cluster_scope']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'name': 'cert1'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'name': 'cert1'})['changed'] + + +def test_error_certificate_not_exist(): + ''' error if certificate not present ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['empty_records']), + # do not throw error if certificate not exist and state is absent. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['empty_records']) + ]) + error = "Error: certificate cert1 is not installed" + assert error in create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_delete_security_ipsec_ca_certificate(): + ''' test delete ipsec ca certificate ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['ipsec_ca_cluster_scope']), + ('GET', 'security/ipsec/ca-certificates/878eaa35', SRR['ipsec_ca_cluster_scope']), + ('DELETE', 'security/ipsec/ca-certificates/878eaa35', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['ipsec_ca_cluster_scope']), + ('GET', 'security/ipsec/ca-certificates/878eaa35', SRR['empty_records']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_all_methods_catch_exception(): + ''' test exception in get/create/modify/delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + # GET/POST/DELETE error. + ('GET', 'security/certificates', SRR['generic_error']), + ('GET', 'security/certificates', SRR['ipsec_ca_cluster_scope']), + ('GET', 'security/ipsec/ca-certificates/878eaa35', SRR['generic_error']), + ('POST', 'security/ipsec/ca-certificates', SRR['generic_error']), + ('DELETE', 'security/ipsec/ca-certificates/878eaa35', SRR['generic_error']) + ]) + ca_obj = create_module(my_module, DEFAULT_ARGS) + assert 'Error fetching uuid for certificate' in expect_and_capture_ansible_exception(ca_obj.get_certificate_uuid, 'fail')['msg'] + assert 'Error fetching security IPsec CA certificate' in expect_and_capture_ansible_exception(ca_obj.get_ipsec_ca_certificate, 'fail')['msg'] + assert 'Error adding security IPsec CA certificate' in expect_and_capture_ansible_exception(ca_obj.create_ipsec_ca_certificate, 'fail')['msg'] + assert 'Error deleting security IPsec CA certificate' in expect_and_capture_ansible_exception(ca_obj.delete_ipsec_ca_certificate, 'fail')['msg'] + + +def test_error_ontap9_9_1(): + ''' test module supported from 9.10.1 ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']) + ]) + assert 'requires ONTAP 9.10.1 or later' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_config.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_config.py new file mode 100644 index 000000000..e4f7d2527 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_config.py @@ -0,0 +1,87 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception, call_main, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_ipsec_config \ + import NetAppOntapSecurityIPsecConfig as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' +} + + +SRR = rest_responses({ + 'ipsec_config': (200, {"records": [{"enabled": True, "replay_window": "64"}]}, None), + 'ipsec_config_1': (200, {"records": [{"enabled": False, "replay_window": "0"}]}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_modify_security_ipsec_config(): + ''' create ipsec policy with certificates ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec', SRR['ipsec_config_1']), + ('PATCH', 'security/ipsec', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec', SRR['ipsec_config']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec', SRR['empty_records']), + ]) + args = { + "enabled": True, + "replay_window": 64 + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_all_methods_catch_exception(): + ''' test exception in get/create/modify/delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # GET/PATCH error. + ('GET', 'security/ipsec', SRR['generic_error']), + ('PATCH', 'security/ipsec', SRR['generic_error']) + ]) + sec_obj = create_module(my_module, DEFAULT_ARGS) + assert 'Error fetching security IPsec config' in expect_and_capture_ansible_exception(sec_obj.get_security_ipsec_config, 'fail')['msg'] + assert 'Error modifying security IPsec config' in expect_and_capture_ansible_exception(sec_obj.modify_security_ipsec_config, 'fail', {})['msg'] + + +def test_error_ontap97(): + ''' test module supported from 9.8 ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']) + ]) + assert 'requires ONTAP 9.8.0 or later' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_policy.py new file mode 100644 index 000000000..b913ac03e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ipsec_policy.py @@ -0,0 +1,268 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, \ + create_and_apply, create_module, expect_and_capture_ansible_exception, call_main, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, \ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_ipsec_policy \ + import NetAppOntapSecurityIPsecPolicy as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'ipsec_policy', + 'use_rest': 'always', + 'local_endpoint': { + 'address': '10.23.43.23', + 'netmask': '24', + 'port': '201' + }, + 'remote_endpoint': { + 'address': '10.23.43.13', + 'netmask': '24' + }, + 'protocol': 'tcp' +} + + +def form_rest_response(args=None): + response = { + "uuid": "6c025f9b", + "name": "ipsec1", + "scope": "svm", + "svm": {"name": "ansibleSVM"}, + "local_endpoint": { + "address": "10.23.43.23", + "netmask": "24", + "port": "201-201" + }, + "remote_endpoint": { + "address": "10.23.43.13", + "netmask": "24", + "port": "0-0" + }, + "protocol": "tcp", + "local_identity": "ing", + "remote_identity": "ing", + "action": "discard", + "enabled": False, + "authentication_method": "none" + } + if args: + response.update(args) + return response + + +SRR = rest_responses({ + 'ipsec_auth_none': (200, {"records": [form_rest_response()], "num_records": 1}, None), + 'ipsec_auth_psk': (200, {"records": [form_rest_response({ + "action": "esp_transport", + "authentication_method": "psk" + })], "num_records": 1}, None), + 'ipsec_auth_pki': (200, {"records": [form_rest_response({ + "action": "esp_transport", + "authentication_method": "pki", + "certificate": {"name": "ca_cert"} + })], "num_records": 1}, None), + 'ipsec_modify': (200, {"records": [form_rest_response({ + "local_endpoint": {"address": "10.23.43.24", "netmask": "24"}, + "remote_endpoint": {"address": "10.23.43.14", "netmask": "24", "port": "200-200"}, + "protocol": "udp", + })], "num_records": 1}, None), + 'ipsec_ipv6': (200, {"records": [form_rest_response({ + "local_endpoint": {"address": "2402:940::45", "netmask": "64", "port": "120-120"}, + "remote_endpoint": {"address": "2402:940::55", "netmask": "64", "port": "200-200"}, + "protocol": "udp", + })], "num_records": 1}, None), + 'ipsec_ipv6_modify': (200, {"records": [form_rest_response({ + "local_endpoint": {"address": "2402:940::46", "netmask": "64", "port": "120-120"}, + "remote_endpoint": {"address": "2402:940::56", "netmask": "64", "port": "200-200"}, + "protocol": "udp", + })], "num_records": 1}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "name"] + error = create_module(my_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_create_security_ipsec_policy_certificate(): + ''' create ipsec policy with certificates ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ipsec/policies', SRR['empty_records']), + ('POST', 'security/ipsec/policies', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_pki']), + ]) + args = { + "action": "esp_transport", + "authentication_method": "pki", + "certificate": "ca_cert" + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_create_security_ipsec_policy_psk(): + ''' create ipsec policy with pre-shared keys ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ipsec/policies', SRR['empty_records']), + ('POST', 'security/ipsec/policies', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_psk']), + ]) + args = { + "action": "esp_transport", + "authentication_method": "psk", + "secret_key": "QDFRTGJUOJDE4RFGDSDW" + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_create_security_ipsec_policy(): + ''' create ipsec policy without authentication method in 9.8 ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['empty_records']), + ('POST', 'security/ipsec/policies', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_none']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS)['changed'] + + +def test_modify_security_ipsec_policy(): + ''' modify ipsec policy ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_none']), + ('PATCH', 'security/ipsec/policies/6c025f9b', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['ipsec_modify']) + ]) + args = { + "local_endpoint": {"address": "10.23.43.24", "netmask": "255.255.255.0"}, + "remote_endpoint": {"address": "10.23.43.14", "netmask": "255.255.255.0", "port": "200"}, + "protocol": "udp" + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_warnings_raised(): + ''' test warnings raised ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + args = {"certificate": "new_Cert", "authentication_method": "pki", "action": "discard"} + create_module(my_module, DEFAULT_ARGS, args) + warning = "The IPsec action is discard" + print_warnings() + assert_warning_was_raised(warning, partial_match=True) + + args = {"secret_key": "AEDFGJTUSHNFGKGLFD", "authentication_method": "psk", "action": "bypass"} + create_module(my_module, DEFAULT_ARGS, args) + warning = "The IPsec action is bypass" + print_warnings() + assert_warning_was_raised(warning, partial_match=True) + + +def test_modify_security_ipsec_policy_ipv6(): + ''' test modify ipv6 address ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['ipsec_ipv6']), + ('PATCH', 'security/ipsec/policies/6c025f9b', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['ipsec_ipv6_modify']) + ]) + args = { + "local_endpoint": {"address": "2402:0940:000:000:00:00:0000:0046", "netmask": "64"}, + "remote_endpoint": {"address": "2402:0940:000:000:00:00:0000:0056", "netmask": "64", "port": "200"}, + "protocol": "17", + } + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + + +def test_delete_security_ipsec_policy(): + ''' test delete ipsec policy ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_none']), + ('DELETE', 'security/ipsec/policies/6c025f9b', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/ipsec/policies', SRR['empty_records']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_all_methods_catch_exception(): + ''' test exception in get/create/modify/delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # GET/POST/PATCH/DELETE error. + ('GET', 'security/ipsec/policies', SRR['generic_error']), + ('POST', 'security/ipsec/policies', SRR['generic_error']), + ('PATCH', 'security/ipsec/policies/6c025f9b', SRR['generic_error']), + ('DELETE', 'security/ipsec/policies/6c025f9b', SRR['generic_error']) + ]) + sec_obj = create_module(my_module, DEFAULT_ARGS) + sec_obj.uuid = '6c025f9b' + assert 'Error fetching security ipsec policy' in expect_and_capture_ansible_exception(sec_obj.get_security_ipsec_policy, 'fail')['msg'] + assert 'Error creating security ipsec policy' in expect_and_capture_ansible_exception(sec_obj.create_security_ipsec_policy, 'fail')['msg'] + assert 'Error modifying security ipsec policy' in expect_and_capture_ansible_exception(sec_obj.modify_security_ipsec_policy, 'fail', {})['msg'] + assert 'Error deleting security ipsec policy' in expect_and_capture_ansible_exception(sec_obj.delete_security_ipsec_policy, 'fail')['msg'] + + +def test_modify_error(): + ''' test modify error ''' + register_responses([ + # Error if try to modify certificate for auth_method none. + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_none']), + # Error if try to modify action and authentication_method + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ipsec/policies', SRR['ipsec_auth_none']) + + ]) + args = {'certificate': 'cert_new'} + assert 'Error: cannot set certificate for IPsec policy' in create_and_apply(my_module, DEFAULT_ARGS, args, fail=True)['msg'] + args = {'authentication_method': 'psk', 'action': 'esp_udp', 'secret_key': 'secretkey'} + assert 'Error: cannot modify options' in create_and_apply(my_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def test_error_ontap97(): + ''' test module supported from 9.8 ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']) + ]) + assert 'requires ONTAP 9.8.0 or later' in call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py new file mode 100644 index 000000000..38d18981f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_key_manager.py @@ -0,0 +1,804 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_warning_was_raised, call_main, create_module, expect_and_capture_ansible_exception, patch_ansible, print_warnings + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_key_manager import\ + NetAppOntapSecurityKeyManager as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +security_key_info = { + 'attributes-list': { + 'key-manager-info': { + 'key-manager-ip-address': '0.1.2.3', + 'key-manager-server-status': 'available', + 'key-manager-tcp-port': '5696', + 'node-name': 'test_node' + } + } +} + +ZRR = zapi_responses({ + 'security_key_info': build_zapi_response(security_key_info, 1) +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never' + } + error = 'missing required arguments:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_get_nonexistent_key_manager(): + ''' Test if get_key_manager() returns None for non-existent key manager ''' + register_responses([ + ('ZAPI', 'security-key-manager-get-iter', ZRR['no_records']), + ]) + module_args = { + 'ip_address': '1.2.3.4', + 'use_rest': 'never' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + result = my_obj.get_key_manager() + assert result is None + + +def test_get_existing_key_manager(): + ''' Test if get_key_manager() returns details for existing key manager ''' + register_responses([ + ('ZAPI', 'security-key-manager-get-iter', ZRR['security_key_info']), + ]) + module_args = { + 'ip_address': '1.2.3.4', + 'use_rest': 'never' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + result = my_obj.get_key_manager() + assert result['ip_address'] == '0.1.2.3' + + +def test_successfully_add_key_manager(): + ''' Test successfully add key manager''' + register_responses([ + ('ZAPI', 'security-key-manager-setup', ZRR['success']), + ('ZAPI', 'security-key-manager-get-iter', ZRR['no_records']), + ('ZAPI', 'security-key-manager-add', ZRR['success']), + # idempotency + ('ZAPI', 'security-key-manager-setup', ZRR['success']), + ('ZAPI', 'security-key-manager-get-iter', ZRR['security_key_info']), + ]) + module_args = { + 'ip_address': '0.1.2.3', + 'use_rest': 'never' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_key_manager(): + ''' Test successfully add key manager''' + register_responses([ + ('ZAPI', 'security-key-manager-setup', ZRR['success']), + ('ZAPI', 'security-key-manager-get-iter', ZRR['security_key_info']), + ]) + module_args = { + 'ip_address': '1.2.3.4', + 'use_rest': 'never' + } + error = "Error, cannot modify existing configuraton: modify is not supported with ZAPI, new values: {'ip_address': '1.2.3.4'}, current values:" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successfully_delete_key_manager(): + ''' Test successfully delete key manager''' + register_responses([ + ('ZAPI', 'security-key-manager-setup', ZRR['success']), + ('ZAPI', 'security-key-manager-get-iter', ZRR['security_key_info']), + ('ZAPI', 'security-key-manager-delete', ZRR['success']), + # idempotency + ('ZAPI', 'security-key-manager-setup', ZRR['success']), + ('ZAPI', 'security-key-manager-get-iter', ZRR['no_records']), + ]) + module_args = { + 'ip_address': '1.2.3.4', + 'state': 'absent', + 'use_rest': 'never', + 'node': 'some_node' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('The option "node" is deprecated and should not be used.') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + module_args = { + 'ip_address': '1.2.3.4', + 'use_rest': 'never', + 'node': 'some_node' + } + error = 'Error: the python NetApp-Lib module is required. Import error: None' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print_warnings() + assert_warning_was_raised('The option "node" is deprecated and should not be used.') + + +def test_error_handling(): + ''' test error handling on ZAPI calls ''' + register_responses([ + ('ZAPI', 'security-key-manager-setup', ZRR['error']), + ('ZAPI', 'security-key-manager-get-iter', ZRR['error']), + ('ZAPI', 'security-key-manager-add', ZRR['error']), + ('ZAPI', 'security-key-manager-delete', ZRR['error']), + + ]) + module_args = { + 'ip_address': '1.2.3.4', + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = zapi_error_message('Error setting up key manager') + assert error in expect_and_capture_ansible_exception(my_obj.key_manager_setup, 'fail')['msg'] + error = zapi_error_message('Error fetching key manager') + assert error in expect_and_capture_ansible_exception(my_obj.get_key_manager, 'fail')['msg'] + error = zapi_error_message('Error creating key manager') + assert error in expect_and_capture_ansible_exception(my_obj.create_key_manager, 'fail')['msg'] + error = zapi_error_message('Error deleting key manager') + assert error in expect_and_capture_ansible_exception(my_obj.delete_key_manager, 'fail')['msg'] + + +def test_rest_is_required(): + '''report error if external or onboard are used with ZAPI''' + register_responses([ + ]) + module_args = { + 'onboard': { + 'synchronize': True + }, + 'use_rest': 'never', + } + error = 'Error: REST is required for onboard option.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + 'external': { + 'servers': ['0.1.2.3:5696'], + 'client_certificate': 'client_certificate', + 'server_ca_certificates': ['server_ca_certificate'] + }, + 'use_rest': 'never', + 'vserver': 'svm_name', + } + error = 'options.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'one_external_seckey_record': (200, { + 'records': [{ + 'uuid': 'a1b2c3', + 'external': { + 'servers': [{'server': '0.1.2.3:5696'}] + }}], + 'num_records': 1 + }, None), + 'one_external_seckey_record_2_servers': (200, { + 'records': [{ + 'uuid': 'a1b2c3', + 'external': { + 'servers': [ + {'server': '1.2.3.4:5696'}, + {'server': '0.1.2.3:5696'}] + }, + 'onboard': {'enabled': False}}], + 'num_records': 1 + }, None), + 'one_onboard_seckey_record': (200, { + 'records': [{ + 'uuid': 'a1b2c3', + 'onboard': { + 'enabled': True, + 'key_backup': "certificate", + }}], + 'num_records': 1 + }, None), + 'one_security_certificate_record': (200, { + 'records': [{'uuid': 'a1b2c3'}], + 'num_records': 1 + }, None), + 'error_duplicate': (400, None, {'message': 'New passphrase cannot be same as the old passphrase.'}), + 'error_incorrect': (400, None, {'message': 'Cluster-wide passphrase is incorrect.'}), + 'error_svm_not_found': (400, None, {'message': 'SVM "svm_name" does not exist'}), + 'error_already_present': (400, None, {'message': 'already has external key management configured'}), +}, False) + + +def test_successfully_add_key_manager_old_style_rest(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['zero_records']), + ('POST', 'security/key-managers', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record']), + ]) + module_args = { + 'ip_address': '0.1.2.3', + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_add_key_manager_external_rest(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['zero_records']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('POST', 'security/key-managers', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ]) + module_args = { + 'external': { + 'servers': ['0.1.2.3:5696'], + 'client_certificate': 'client_certificate', + 'server_ca_certificates': ['server_ca_certificate'] + }, + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_add_key_manager_external_rest_svm(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['zero_records']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('POST', 'security/key-managers', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ]) + module_args = { + 'external': { + 'servers': ['0.1.2.3:5696'], + 'client_certificate': 'client_certificate', + 'server_ca_certificates': ['server_ca_certificate'] + }, + 'vserver': 'svm_name', + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_add_key_manager_onboard_rest(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['zero_records']), + ('POST', 'security/key-managers', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ]) + module_args = { + 'onboard': { + 'passphrase': 'passphrase_too_short', + 'from_passphrase': 'ignored on create', + }, + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_add_key_manager_onboard_svm_rest(): + ''' Test successfully add key manager''' + register_responses([ + ]) + module_args = { + 'onboard': { + 'passphrase': 'passphrase_too_short', + 'from_passphrase': 'ignored on create', + }, + 'vserver': 'svm_name', + 'use_rest': 'always' + } + error = 'parameters are mutually exclusive:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successfully_delete_key_manager_rest(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('DELETE', 'security/key-managers/a1b2c3', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['zero_records']), + ]) + module_args = { + 'state': 'absent', + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_change_passphrase_onboard_key_manager_rest(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_incorrect']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # both passphrases are incorrect + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_incorrect']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_incorrect']), + # unexpected success on check passphrase + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # unexpected success on check passphrase + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_incorrect']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # unexpected success on check passphrase + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # unexpected error on check passphrase + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['generic_error']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # unexpected error on check passphrase + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_incorrect']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['generic_error']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # unexpected error on check passphrase + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['generic_error']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['generic_error']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ]) + module_args = { + 'onboard': { + 'passphrase': 'passphrase_too_short', + 'from_passphrase': 'passphrase_too_short' + }, + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + error = rest_error_message('Error: neither from_passphrase nor passphrase match installed passphrase', + 'security/key-managers/a1b2c3', + got="got {'message': 'Cluster-wide passphrase is incorrect.'}.") + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + # success + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # ignored error + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_change_passphrase_and_sync_onboard_key_manager_rest(): + ''' Test successfully modify onboard key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_incorrect']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + # idempotency - sync is always sent! + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['error_duplicate']), + ('PATCH', 'security/key-managers/a1b2c3', SRR['success']), + ]) + module_args = { + 'onboard': { + 'passphrase': 'passphrase_too_short', + 'from_passphrase': 'passphrase_too_short', + 'synchronize': True + }, + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_change_external_key_manager_rest(): + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record_2_servers']), + ('DELETE', 'security/key-managers/a1b2c3/key-servers/1.2.3.4:5696', SRR['success']), + ('DELETE', 'security/key-managers/a1b2c3/key-servers/0.1.2.3:5696', SRR['success']), + ('POST', 'security/key-managers/a1b2c3/key-servers', SRR['success']), + ('POST', 'security/key-managers/a1b2c3/key-servers', SRR['success']), + # same servers but different order + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record_2_servers']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record_2_servers']), + ]) + module_args = { + 'external': { + 'servers': ['0.1.2.3:5697', '1.2.3.4:5697'] + }, + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # same servers but different order + module_args = { + 'external': { + 'servers': ['0.1.2.3:5696', '1.2.3.4:5696'] + }, + 'use_rest': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # idempotency + module_args = { + 'external': { + 'servers': ['1.2.3.4:5696', '0.1.2.3:5696'] + }, + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_external_key_manager_rest(): + ''' Test error add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['generic_error']), + ('GET', 'security/certificates', SRR['generic_error']), + ('POST', 'security/key-managers', SRR['generic_error']), + ('PATCH', 'security/key-managers/123', SRR['generic_error']), + ('DELETE', 'security/key-managers/123', SRR['generic_error']), + ('POST', 'security/key-managers/123/key-servers', SRR['generic_error']), + ('DELETE', 'security/key-managers/123/key-servers/server_name', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = rest_error_message('Error fetching key manager info for cluster', 'security/key-managers') + assert error in expect_and_capture_ansible_exception(my_obj.get_key_manager, 'fail')['msg'] + error = rest_error_message('Error fetching security certificate info for name of type: type on cluster', 'security/certificates') + assert error in expect_and_capture_ansible_exception(my_obj.get_security_certificate_uuid_rest, 'fail', 'name', 'type')['msg'] + error = rest_error_message('Error creating key manager for cluster', 'security/key-managers') + assert error in expect_and_capture_ansible_exception(my_obj.create_key_manager_rest, 'fail')['msg'] + my_obj.uuid = '123' + error = rest_error_message('Error modifying key manager for cluster', 'security/key-managers/123') + assert error in expect_and_capture_ansible_exception(my_obj.modify_key_manager_rest, 'fail', {'onboard': {'xxxx': 'yyyy'}})['msg'] + error = rest_error_message('Error deleting key manager for cluster', 'security/key-managers/123') + assert error in expect_and_capture_ansible_exception(my_obj.delete_key_manager_rest, 'fail')['msg'] + error = rest_error_message('Error adding external key server server_name', 'security/key-managers/123/key-servers') + assert error in expect_and_capture_ansible_exception(my_obj.add_external_server_rest, 'fail', 'server_name')['msg'] + error = rest_error_message('Error removing external key server server_name', 'security/key-managers/123/key-servers/server_name') + assert error in expect_and_capture_ansible_exception(my_obj.remove_external_server_rest, 'fail', 'server_name')['msg'] + + +def test_get_security_certificate_uuid_rest_by_name_then_common_name(): + ''' Use name first, then common_name''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + # not found + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['zero_records']), + # with 9.7 or earlier, name is not supported + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_security_certificate_uuid_rest('name', 'type') is not None + assert_warning_was_raised('certificate name not found, retrying with common_name and type type.') + # not found, neither with name nor common_name + error = 'Error fetching security certificate info for name of type: type on cluster: not found.' + assert error in expect_and_capture_ansible_exception(my_obj.get_security_certificate_uuid_rest, 'fail', 'name', 'type')['msg'] + # 9.7 + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_security_certificate_uuid_rest('name', 'type') is not None + assert_warning_was_raised('name is not supported in 9.6 or 9.7, using common_name name and type type.') + + +def test_get_security_certificate_uuid_rest_by_name_then_common_name_svm(): + ''' With SVM, retry at cluster scope if not found or error at SVM scope ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + # not found + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['generic_error']), + ('GET', 'security/certificates', SRR['zero_records']), + # with 9.7 or earlier, name is not supported + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ]) + module_args = { + 'use_rest': 'always', + 'vserver': 'svm_name' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_security_certificate_uuid_rest('name', 'type') is not None + assert_warning_was_raised('certificate name not found, retrying with common_name and type type.') + # not found, neither with name nor common_name + error = 'Error fetching security certificate info for name of type: type on vserver: svm_name: not found.' + assert error in expect_and_capture_ansible_exception(my_obj.get_security_certificate_uuid_rest, 'fail', 'name', 'type')['msg'] + # 9.7 + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_security_certificate_uuid_rest('name', 'type') is not None + assert_warning_was_raised('name is not supported in 9.6 or 9.7, using common_name name and type type.') + + +def test_warn_when_onboard_exists_and_only_one_passphrase_present(): + ''' Warn if only one passphrase is present ''' + register_responses([ + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ]) + module_args = { + 'onboard': { + 'passphrase': 'passphrase_too_short', + }, + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_warning_was_raised('passphrase is ignored') + module_args = { + 'onboard': { + 'from_passphrase': 'passphrase_too_short', + }, + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert_warning_was_raised('from_passphrase is ignored') + + +def test_error_cannot_change_key_manager_type_rest(): + ''' Warn if only one passphrase is present ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_external_seckey_record']), + ]) + module_args = { + 'external': { + 'servers': ['0.1.2.3:5697', '1.2.3.4:5697'] + }, + 'use_rest': 'always' + } + error = 'Error, cannot modify existing configuraton: onboard key-manager is already installed, it needs to be deleted first.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + 'onboard': { + 'from_passphrase': 'passphrase_too_short', + }, + 'use_rest': 'always' + } + error = 'Error, cannot modify existing configuraton: external key-manager is already installed, it needs to be deleted first.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_error_sync_repquires_passphrase_rest(): + ''' Warn if only one passphrase is present ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['one_onboard_seckey_record']), + ]) + module_args = { + 'onboard': { + 'synchronize': True + }, + 'use_rest': 'always' + } + error = 'Error: passphrase is required for synchronize.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_return_not_present_when_svm_not_found_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['error_svm_not_found']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'svm_name', + 'use_rest': 'always' + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_retry_on_create_error(dont_sleep): + """ when no key server is present, REST does not return a record """ + ''' Test successfully add key manager''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/key-managers', SRR['zero_records']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('GET', 'security/certificates', SRR['one_security_certificate_record']), + ('POST', 'security/key-managers', SRR['error_already_present']), + ('DELETE', 'security/key-managers', SRR['success']), + # we only retry once, erroring out + ('POST', 'security/key-managers', SRR['error_already_present']), + + ]) + module_args = { + 'external': { + 'servers': ['0.1.2.3:5696'], + 'client_certificate': 'client_certificate', + 'server_ca_certificates': ['server_ca_certificate'] + }, + 'vserver': 'svm_name', + 'use_rest': 'always' + } + error = 'Error creating key manager for cluster:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_update_key_server_list(): + ''' Validate servers are added/removed ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # add/remove + ('DELETE', 'security/key-managers/123/key-servers/s1', SRR['success']), + ('DELETE', 'security/key-managers/123/key-servers/s3', SRR['success']), + ('POST', 'security/key-managers/123/key-servers', SRR['success']), + ('POST', 'security/key-managers/123/key-servers', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + } + # no requested change + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + current = { + 'external': { + 'servers': [ + {'server': 's1'}, + {'server': 's2'}, + {'server': 's3'}, + ] + } + } + # idempotent + assert my_obj.update_key_server_list(current) is None + my_obj.parameters['external'] = { + 'servers': [ + {'server': 's1'}, + {'server': 's2'}, + {'server': 's3'}, + ] + } + assert my_obj.update_key_server_list(current) is None + # delete/add + my_obj.parameters['external'] = { + 'servers': [ + {'server': 's4'}, + {'server': 's2'}, + {'server': 's5'}, + ] + } + my_obj.uuid = '123' + assert my_obj.update_key_server_list(current) is None diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ssh.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ssh.py new file mode 100644 index 000000000..f7723db63 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_security_ssh.py @@ -0,0 +1,164 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible, call_main +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_security_ssh import main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +SRR = rest_responses({ + 'ssh_security': (200, { + "records": [ + { + "ciphers": [ + "aes256_ctr", + "aes192_ctr", + "aes128_ctr" + ], + "max_authentication_retry_count": 0, + "svm": { + "name": "ansibleSVM", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "mac_algorithms": ["hmac_sha1", "hmac_sha2_512_etm"], + "key_exchange_algorithms": [ + "diffie_hellman_group_exchange_sha256", + "diffie_hellman_group14_sha1" + ], + }], + "num_records": 1 + }, None), + 'ssh_security_no_svm': (200, { + "records": [ + { + "ciphers": [ + "aes256_ctr", + + ], + }], + "num_records": 1 + }, None), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', +} + + +def test_get_security_ssh_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh/svms', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh', SRR['generic_error']) + ]) + module_args = {"vserver": "AnsibleSVM"} + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = 'calling: security/ssh/svms: got Expected error.' + assert msg in error + error = call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] + + +def test_modify_security_ssh_algorithms_rest(): + ''' test modify algorithms ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh/svms', SRR['ssh_security']), + ('PATCH', 'security/ssh/svms/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['empty_good']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh', SRR['ssh_security']), + ('PATCH', 'security/ssh', SRR['empty_good']), + ]) + module_args = { + "vserver": "AnsibleSVM", + "ciphers": ["aes256_ctr", "aes192_ctr"], + "mac_algorithms": ["hmac_sha1", "hmac_sha2_512_etm"], + "key_exchange_algorithms": ["diffie_hellman_group_exchange_sha256"], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args.pop('vserver') + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_security_ssh_retry_rest(): + ''' test modify maximum retry count ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh/svms', SRR['ssh_security']), + ('PATCH', 'security/ssh/svms/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['empty_good']), + ]) + module_args = { + "vserver": "AnsibleSVM", + "max_authentication_retry_count": 2, + } + assert call_main(my_main, DEFAULT_ARGS, module_args) + + +def test_error_modify_security_ssh_rest(): + ''' test modify algorithms ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh/svms', SRR['ssh_security']), + ('PATCH', 'security/ssh/svms/02c9e252-41be-11e9-81d5-00a0986138f7', SRR['generic_error']), + ]) + module_args = { + "vserver": "AnsibleSVM", + "ciphers": ["aes256_ctr", "aes192_ctr"], + "max_authentication_retry_count": 2, + "mac_algorithms": ["hmac_sha1", "hmac_sha2_512_etm"], + "key_exchange_algorithms": ["diffie_hellman_group_exchange_sha256"], + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = 'calling: security/ssh/svms/02c9e252-41be-11e9-81d5-00a0986138f7: got Expected error.' + assert msg in error + + +def test_error_empty_security_ssh_rest(): + ''' Validation of input parameters ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + module_args = { + "ciphers": [] + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = 'Removing all SSH ciphers is not supported. SSH login would fail. ' + \ + 'There must be at least one ciphers associated with the SSH configuration.' + assert msg in error + + +def test_module_error_ontap_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = {'use_rest': 'always'} + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Error: na_ontap_security_ssh only supports REST, and requires ONTAP 9.10.1 or later' in error + + +def test_module_error_no_svm_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/ssh/svms', SRR['ssh_security_no_svm']), + ]) + module_args = { + "vserver": "AnsibleSVM", + "ciphers": ["aes256_ctr", "aes192_ctr"] + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Error: no uuid found for the SVM' in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_policy.py new file mode 100644 index 000000000..c11c44059 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_policy.py @@ -0,0 +1,402 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP service policy Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, expect_and_capture_ansible_exception, call_main, create_module, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_service_policy import NetAppOntapServicePolicy as my_module, main as my_main + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'name': 'sp123', +} + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'one_sp_record': (200, { + "records": [{ + 'name': 'sp123', + 'uuid': 'uuid123', + 'svm': dict(name='vserver'), + 'services': ['data_core'], + 'scope': 'svm', + 'ipspace': dict(name='ipspace') + }], + 'num_records': 1 + }, None), + 'two_sp_records': (200, { + "records": [ + { + 'name': 'sp123', + }, + { + 'name': 'sp124', + }], + 'num_records': 2 + }, None), +}, False) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + module_args = { + 'hostname': '' + } + error = 'missing required arguments: name' + assert error == call_main(my_main, module_args, fail=True)['msg'] + + +def test_ensure_get_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ]) + module_args = { + 'services': ['data_core'], + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is False + assert_no_warnings() + + +def test_ensure_create_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['zero_records']), + ('POST', 'network/ip/service-policies', SRR['empty_good']), + ]) + module_args = { + 'services': ['data_core'], + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_ensure_create_called_cluster(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['zero_records']), + ('POST', 'network/ip/service-policies', SRR['empty_good']), + ]) + module_args = { + 'ipspace': 'ipspace', + 'services': ['data_core'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_ensure_create_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ]) + module_args = { + 'services': ['data_core'], + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is False + assert_no_warnings() + + +def test_ensure_modify_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ('PATCH', 'network/ip/service-policies/uuid123', SRR['empty_good']), + ]) + module_args = { + 'services': ['data_nfs'], + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_ensure_modify_called_no_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ('PATCH', 'network/ip/service-policies/uuid123', SRR['empty_good']), + ]) + module_args = { + 'services': ['no_service'], + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_ensure_delete_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ('DELETE', 'network/ip/service-policies/uuid123', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is True + assert_no_warnings() + + +def test_ensure_delete_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['zero_records']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'vserver', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] is False + assert_no_warnings() + + +def test_negative_extra_record(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['two_sp_records']), + ]) + module_args = { + 'services': ['data_nfs'], + 'vserver': 'vserver', + } + error = 'Error in get_service_policy: calling: network/ip/service-policies: unexpected response' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_ipspace_required_1(): + module_args = { + 'services': ['data_nfs'], + 'vserver': None, + } + error = "vserver is None but all of the following are missing: ipspace" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_ipspace_required_2(): + module_args = { + 'scope': 'cluster', + 'services': ['data_nfs'], + 'vserver': None, + } + error = "scope is cluster but all of the following are missing: ipspace" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_ipspace_required_3(): + module_args = { + 'services': ['data_nfs'], + } + error = "one of the following is required: ipspace, vserver" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_vserver_required_1(): + module_args = { + 'scope': 'svm', + 'services': ['data_nfs'], + } + error = "one of the following is required: ipspace, vserver" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_vserver_required_2(): + module_args = { + 'ipspace': None, + 'scope': 'svm', + 'services': ['data_nfs'], + } + error = "scope is svm but all of the following are missing: vserver" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_vserver_required_3(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'ipspace': None, + 'scope': 'svm', + 'services': ['data_nfs'], + 'vserver': None, + } + error = 'Error: vserver cannot be None when "scope: svm" is specified.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_vserver_not_required(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'ipspace': None, + 'scope': 'cluster', + 'services': ['data_nfs'], + 'vserver': 'vserver', + } + error = 'Error: vserver cannot be set when "scope: cluster" is specified. Got: vserver' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_no_service_not_alone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'scope': 'svm', + 'services': ['data_nfs', 'no_service'], + 'vserver': 'vserver', + } + error = "Error: no other service can be present when no_service is specified." + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_no_service_not_alone_with_cluster_scope(): + module_args = { + 'ipspace': 'ipspace', + 'scope': 'cluster', + 'services': ['data_nfs', 'no_service'], + 'vserver': 'vserver', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + error = "Error: no other service can be present when no_service is specified." + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_extra_arg_in_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ]) + module_args = { + 'ipspace': 'ipspace', + 'scope': 'cluster', + 'services': ['data_nfs'], + } + error = "Error: attributes not supported in modify: {'scope': 'cluster'}" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_empty_body_in_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'scope': 'svm', + 'services': ['data_nfs'], + 'vserver': 'vserver', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + current = dict(uuid='') + modify = {} + error = 'Error: nothing to change - modify called with: {}' + assert error in expect_and_capture_ansible_exception(my_obj.modify_service_policy, 'fail', current, modify)['msg'] + assert_no_warnings() + + +def test_negative_create_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['zero_records']), + ('POST', 'network/ip/service-policies', SRR['generic_error']), + ]) + module_args = { + 'scope': 'svm', + 'services': ['data_nfs'], + 'vserver': 'vserver', + } + error = rest_error_message('Error in create_service_policy', 'network/ip/service-policies') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_delete_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ('DELETE', 'network/ip/service-policies/uuid123', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'vserver', + } + error = rest_error_message('Error in delete_service_policy', 'network/ip/service-policies/uuid123') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_modify_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'network/ip/service-policies', SRR['one_sp_record']), + ('PATCH', 'network/ip/service-policies/uuid123', SRR['generic_error']), + ]) + module_args = { + 'services': ['data_nfs'], + 'vserver': 'vserver', + } + error = rest_error_message('Error in modify_service_policy', 'network/ip/service-policies/uuid123') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + + +def test_negative_unknown_services(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'services': ['data_nfs9'], + 'vserver': 'vserver', + } + error = 'Error: unknown service: data_nfs9. New services may need to be added to "additional_services".' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert_no_warnings() + module_args = { + 'services': ['data_nfs9', 'data_cifs', 'dummy'], + 'vserver': 'vserver', + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + for needle in ['Error: unknown services:', 'data_nfs9', 'dummy']: + assert needle in error + assert 'data_cifs' not in error + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py new file mode 100644 index 000000000..c8c249810 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_service_processor_network.py @@ -0,0 +1,296 @@ +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_service_processor_network \ + import NetAppOntapServiceProcessorNetwork as sp_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def mock_args(enable=False, use_rest=False): + data = { + 'node': 'test-vsim1', + 'is_enabled': enable, + 'address_type': 'ipv4', + 'hostname': 'host', + 'username': 'admin', + 'password': 'password', + 'use_rest': 'never' + } + if enable is True: + data['is_enabled'] = enable + data['ip_address'] = '1.1.1.1' + data['gateway_ip_address'] = '2.2.2.2' + data['netmask'] = '255.255.248.0' + data['dhcp'] = 'none' + if use_rest: + data['use_rest'] = 'always' + return data + + +sp_enabled_info = { + 'num-records': 1, + 'attributes-list': { + 'service-processor-network-info': { + 'node': 'test-vsim1', + 'is-enabled': 'true', + 'address-type': 'ipv4', + 'dhcp': 'v4', + 'gateway-ip-address': '2.2.2.2', + 'netmask': '255.255.248.0', + 'ip-address': '1.1.1.1', + 'setup-status': 'succeeded' + } + } +} + +sp_disabled_info = { + 'num-records': 1, + 'attributes-list': { + 'service-processor-network-info': { + 'node-name': 'test-vsim1', + 'is-enabled': 'false', + 'address-type': 'ipv4', + 'setup-status': 'not_setup' + } + } +} + +sp_status_info = { + 'num-records': 1, + 'attributes-list': { + 'service-processor-network-info': { + 'node-name': 'test-vsim1', + 'is-enabled': 'false', + 'address-type': 'ipv4', + 'setup-status': 'in_progress' + } + } +} + +ZRR = zapi_responses({ + 'sp_enabled_info': build_zapi_response(sp_enabled_info), + 'sp_disabled_info': build_zapi_response(sp_disabled_info), + 'sp_status_info': build_zapi_response(sp_status_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "node", "address_type"] + error = create_module(sp_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_modify_error_on_disabled_sp(): + ''' a more interesting test ''' + register_responses([ + ('service-processor-network-get-iter', ZRR['sp_disabled_info']) + ]) + error = 'Error: Cannot modify a service processor network if it is disabled in ZAPI' + assert error in create_and_apply(sp_module, mock_args(), {'ip_address': '1.1.1.1'}, 'error')['msg'] + + +def test_modify_error_on_disabe_dhcp_without_ip(): + ''' a more interesting test ''' + register_responses([ + ('service-processor-network-get-iter', ZRR['sp_enabled_info']) + ]) + error = 'Error: To disable dhcp, configure ip-address, netmask and gateway details manually.' + assert error in create_and_apply(sp_module, mock_args(enable=True), None, fail=True)['msg'] + + +def test_modify_error_of_params_disabled_false(): + ''' a more interesting test ''' + register_responses([ + ('service-processor-network-get-iter', ZRR['sp_enabled_info']) + ]) + error = 'Error: Cannot modify any other parameter for a service processor network if option "is_enabled" is set to false.' + assert error in create_and_apply(sp_module, mock_args(), {'ip_address': '2.1.1.1'}, 'error')['msg'] + + +def test_modify_sp(): + ''' a more interesting test ''' + register_responses([ + ('service-processor-network-get-iter', ZRR['sp_enabled_info']), + ('service-processor-network-modify', ZRR['success']) + ]) + assert create_and_apply(sp_module, mock_args(enable=True), {'ip_address': '3.3.3.3'})['changed'] + + +@patch('time.sleep') +def test_modify_sp_wait(sleep): + ''' a more interesting test ''' + register_responses([ + ('service-processor-network-get-iter', ZRR['sp_enabled_info']), + ('service-processor-network-modify', ZRR['success']), + ('service-processor-network-get-iter', ZRR['sp_enabled_info']) + ]) + args = {'ip_address': '3.3.3.3', 'wait_for_completion': True} + assert create_and_apply(sp_module, mock_args(enable=True), args)['changed'] + + +def test_non_existing_sp(): + register_responses([ + ('service-processor-network-get-iter', ZRR['no_records']) + ]) + error = 'Error No Service Processor for node: test-vsim1' + assert create_and_apply(sp_module, mock_args(), fail=True)['msg'] + + +@patch('time.sleep') +def test_wait_on_sp_status(sleep): + register_responses([ + ('service-processor-network-get-iter', ZRR['sp_enabled_info']), + ('service-processor-network-modify', ZRR['success']), + ('service-processor-network-get-iter', ZRR['sp_status_info']), + ('service-processor-network-get-iter', ZRR['sp_status_info']), + ('service-processor-network-get-iter', ZRR['sp_status_info']), + ('service-processor-network-get-iter', ZRR['sp_status_info']), + ('service-processor-network-get-iter', ZRR['sp_enabled_info']) + ]) + args = {'ip_address': '3.3.3.3', 'wait_for_completion': True} + assert create_and_apply(sp_module, mock_args(enable=True), args)['changed'] + + +def test_if_all_methods_catch_exception(): + ''' test error zapi - get/modify''' + register_responses([ + ('service-processor-network-get-iter', ZRR['error']), + ('service-processor-network-get-iter', ZRR['error']), + ('service-processor-network-modify', ZRR['error']) + ]) + sp_obj = create_module(sp_module, mock_args()) + + assert 'Error fetching service processor network info' in expect_and_capture_ansible_exception(sp_obj.get_service_processor_network, 'fail')['msg'] + assert 'Error fetching service processor network status' in expect_and_capture_ansible_exception(sp_obj.get_sp_network_status, 'fail')['msg'] + assert 'Error modifying service processor network' in expect_and_capture_ansible_exception(sp_obj.modify_service_processor_network, 'fail', {})['msg'] + + +SRR = rest_responses({ + 'sp_enabled_info': (200, {"records": [{ + 'name': 'ansdev-stor-1', + 'service_processor': { + 'dhcp_enabled': False, + 'firmware_version': '3.10', + 'ipv4_interface': { + 'address': '1.1.1.1', + 'gateway': '2.2.2.2', + 'netmask': '255.255.248.0' + }, + 'link_status': 'up', + 'state': 'online' + }, + 'uuid': '5dd7aed0'} + ]}, None), + 'sp_disabled_info': (200, {"records": [{ + 'name': 'ansdev-stor-1', + 'service_processor': { + 'firmware_version': '3.10', + 'link_status': 'up', + 'state': 'online' + }, + 'uuid': '5dd7aed0'} + ]}, None) +}) + + +def test_modify_sp_rest(): + ''' modify sp in rest ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/nodes', SRR['sp_enabled_info']), + ('PATCH', 'cluster/nodes/5dd7aed0', SRR['success']) + ]) + assert create_and_apply(sp_module, mock_args(enable=True, use_rest=True), {'ip_address': '3.3.3.3'})['changed'] + + +def test_non_existing_sp_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/nodes', SRR['empty_records']) + ]) + error = 'Error No Service Processor for node: test-vsim1' + assert create_and_apply(sp_module, mock_args(enable=True, use_rest=True), fail=True)['msg'] + + +def test_if_all_methods_catch_exception_rest(): + ''' test error zapi - get/modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/nodes', SRR['generic_error']), + ('PATCH', 'cluster/nodes/5dd7aed0', SRR['generic_error']) + ]) + sp_obj = create_module(sp_module, mock_args(use_rest=True)) + sp_obj.uuid = '5dd7aed0' + assert 'Error fetching service processor network info' in expect_and_capture_ansible_exception(sp_obj.get_service_processor_network, 'fail')['msg'] + assert 'Error modifying service processor network' in expect_and_capture_ansible_exception(sp_obj.modify_service_processor_network, 'fail', {})['msg'] + + +def test_disable_sp_rest(): + ''' disable not supported in REST ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/nodes', SRR['sp_enabled_info']) + ]) + error = 'Error: disable service processor network status not allowed in REST' + assert error in create_and_apply(sp_module, mock_args(enable=True, use_rest=True), {'is_enabled': False}, 'fail')['msg'] + + +def test_enable_sp_rest_without_ip_or_dhcp(): + ''' enable requires ip or dhcp in REST ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/nodes', SRR['sp_disabled_info']) + ]) + error = 'Error: enable service processor network requires dhcp or ip_address,netmask,gateway details in REST.' + assert error in create_and_apply(sp_module, mock_args(use_rest=True), {'is_enabled': True}, 'fail')['msg'] + + +@patch('time.sleep') +def test_wait_on_sp_status_rest(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'cluster/nodes', SRR['sp_disabled_info']), + ('PATCH', 'cluster/nodes/5dd7aed0', SRR['success']), + ('GET', 'cluster/nodes', SRR['sp_disabled_info']), + ('GET', 'cluster/nodes', SRR['sp_disabled_info']), + ('GET', 'cluster/nodes', SRR['sp_enabled_info']) + ]) + args = {'ip_address': '1.1.1.1', 'wait_for_completion': True} + assert create_and_apply(sp_module, mock_args(enable=True, use_rest=True), args)['changed'] + + +def test_error_dhcp_for_address_type_ipv6(): + ''' dhcp cannot be disabled if manual interface options not set''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']) + ]) + error = 'Error: dhcp cannot be set for address_type: ipv6' + args = {'address_type': 'ipv6', 'dhcp': 'v4'} + assert error in create_module(sp_module, mock_args(use_rest=True), args, fail=True)['msg'] + + +def test_error_dhcp_enable_and_set_manual_options_rest(): + ''' dhcp enable and manual interface options set together''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']) + ]) + error = "Error: set dhcp v4 or all of 'ip_address, gateway_ip_address, netmask'." + args = {'dhcp': 'v4'} + assert error in create_module(sp_module, mock_args(use_rest=True, enable=True), args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snaplock_clock.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snaplock_clock.py new file mode 100644 index 000000000..6177f2a29 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snaplock_clock.py @@ -0,0 +1,228 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP fpolicy ext engine Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snaplock_clock \ + import NetAppOntapSnaplockClock as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'snaplock_clock_set': + xml = self.build_snaplock_clock_info_set() + elif self.type == 'snaplock_clock_not_set': + xml = self.build_snaplock_clock_info_not_set() + elif self.type == 'snaplock_clock_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_snaplock_clock_info_set(): + ''' build xml data for snaplock-get-node-compliance-clock ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'snaplock-node-compliance-clock': { + 'compliance-clock-info': { + 'formatted-snaplock-compliance-clock': 'Tue Mar 23 09:56:07 EDT 2021 -04:00' + } + } + } + xml.translate_struct(data) + return xml + + @staticmethod + def build_snaplock_clock_info_not_set(): + ''' build xml data for snaplock-get-node-compliance-clock ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'snaplock-node-compliance-clock': { + 'compliance-clock-info': { + 'formatted-snaplock-compliance-clock': 'ComplianceClock is not configured.' + } + } + } + xml.translate_struct(data) + return xml + + +def default_args(): + args = { + 'node': 'node1', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'snaplock_clock_set_record': (200, { + "records": [{ + 'node': 'node1', + 'time': 'Tue Mar 23 09:56:07 EDT 2021 -04:00' + }], + 'num_records': 1 + }, None), + 'snaplock_clock_not_set_record': (200, { + "records": [{ + 'node': 'node1', + 'time': 'ComplianceClock is not configured.' + }], + 'num_records': 1 + }, None) + +} + + +def get_snaplock_clock_mock_object(cx_type='zapi', kind=None): + snaplock_clock_obj = my_module() + if cx_type == 'zapi': + if kind is None: + snaplock_clock_obj.server = MockONTAPConnection() + else: + snaplock_clock_obj.server = MockONTAPConnection(kind=kind) + return snaplock_clock_obj + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_ensure_get_called(patch_ansible): + ''' test get_snaplock_clock for non initialized clock''' + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + print('starting') + my_obj = my_module() + print('use_rest:', my_obj.use_rest) + my_obj.server = MockONTAPConnection(kind='snaplock_clock_not_set') + assert my_obj.get_snaplock_node_compliance_clock is not None + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' test for missing arguments ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snaplock_clock.NetAppOntapSnaplockClock.set_snaplock_node_compliance_clock') +def test_successful_initialize(self, patch_ansible): + ''' Initializing snaplock_clock and test idempotency ''' + args = dict(default_args()) + args['use_rest'] = 'never' + args['feature_flags'] = {'no_cserver_ems': True} + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='snaplock_clock_not_set') + with patch.object(my_module, 'set_snaplock_node_compliance_clock', wraps=my_obj.set_snaplock_node_compliance_clock) as mock_create: + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert exc.value.args[0]['changed'] + mock_create.assert_called_with() + # test idempotency + args = dict(default_args()) + args['use_rest'] = 'never' + args['feature_flags'] = {'no_cserver_ems': True} + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('snaplock_clock_set') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Create: ' + repr(exc.value)) + assert not exc.value.args[0]['changed'] + + +def test_if_all_methods_catch_exception(patch_ansible): + args = dict(default_args()) + args['use_rest'] = 'never' + set_module_args(args) + my_obj = my_module() + my_obj.server = MockONTAPConnection('snaplock_clock_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.set_snaplock_node_compliance_clock() + assert 'Error setting snaplock compliance clock for node ' in exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_initialize(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Initialize snaplock clock ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['snaplock_clock_not_set_record'], # get + SRR['empty_good'], # post + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_initialize_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Initialize snaplock clock idempotent ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['snaplock_clock_set_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py new file mode 100644 index 000000000..9ba179279 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror.py @@ -0,0 +1,1894 @@ +''' unit tests ONTAP Ansible module: na_ontap_snapmirror ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, assert_warning_was_raised, expect_and_capture_ansible_exception, call_main, create_module, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror \ + import NetAppONTAPSnapmirror as my_module, main as my_main + +HAS_SF_COMMON = True +try: + from solidfire.common import ApiServerError +except ImportError: + HAS_SF_COMMON = False + +if not HAS_SF_COMMON: + pytestmark = pytest.mark.skip('skipping as missing required solidfire.common') + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + "hostname": "10.193.189.206", + "username": "admin", + "password": "netapp123", + "https": "yes", + "validate_certs": "no", + "state": "present", + "initialize": "True", + "relationship_state": "active", + "source_path": "svmsrc3:volsrc1", + "destination_path": "svmdst3:voldst1", + "relationship_type": "extended_data_protection" +} + + +def sm_rest_info(state, healthy, transfer_state=None, destination_path=DEFAULT_ARGS['destination_path']): + record = { + 'uuid': 'b5ee4571-5429-11ec-9779-005056b39a06', + 'destination': { + 'path': destination_path + }, + 'policy': { + 'name': 'MirrorAndVault' + }, + 'state': state, + 'healthy': healthy, + } + if transfer_state: + record['transfer'] = {'state': transfer_state} + if transfer_state == 'transferring': + record['transfer']['uuid'] = 'xfer_uuid' + if healthy is False: + record['unhealthy_reason'] = 'this is why the relationship is not healthy.' + record['transfer_schedule'] = {'name': 'abc'} + + return { + 'records': [record], + 'num_records': 1 + } + + +sm_policies = { + # We query only on the policy name, as it can be at the vserver or cluster scope. + # So we can have ghost records from other SVMs. + 'records': [ + { + 'type': 'sync', + 'svm': {'name': 'other'} + }, + { + 'type': 'async', + 'svm': {'name': 'svmdst3'} + }, + { + 'type': 'svm_invalid', + 'svm': {'name': 'bad_type'} + }, + { + 'type': 'system_invalid', + }, + ], + 'num_records': 4, +} + + +svm_peer_info = { + 'records': [{ + 'peer': { + 'svm': {'name': 'vserver'}, + 'cluster': {'name': 'cluster'}, + } + }] +} + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'sm_get_uninitialized': (200, sm_rest_info('uninitialized', True), None), + 'sm_get_uninitialized_xfering': (200, sm_rest_info('uninitialized', True, 'transferring'), None), + 'sm_get_mirrored': (200, sm_rest_info('snapmirrored', True, 'success'), None), + 'sm_get_restore': (200, sm_rest_info('snapmirrored', True, 'success', destination_path=DEFAULT_ARGS['source_path']), None), + 'sm_get_paused': (200, sm_rest_info('paused', True, 'success'), None), + 'sm_get_broken': (200, sm_rest_info('broken_off', True, 'success'), None), + 'sm_get_data_transferring': (200, sm_rest_info('transferring', True, 'transferring'), None), + 'sm_get_abort': (200, sm_rest_info('sm_get_abort', False, 'failed'), None), + 'sm_get_resync': (200, { + 'uuid': 'b5ee4571-5429-11ec-9779-005056b39a06', + 'description': 'PATCH /api/snapmirror/relationships/1c4467ca-5434-11ec-9779-005056b39a06', + 'state': 'success', + 'message': 'success', + 'code': 0, + }, None), + 'job_status': (201, { + 'job': { + 'uuid': '3a23a60e-542c-11ec-9779-005056b39a06', + '_links': { + 'self': { + 'href': '/api/cluster/jobs/3a23a60e-542c-11ec-9779-005056b39a06' + } + } + } + }, None), + 'sm_policies': (200, sm_policies, None), + 'svm_peer_info': (200, svm_peer_info, None), +}) + + +def sm_info(mirror_state, status, quiesce_status, relationship_type='extended_data_protection', source='ansible:volsrc1'): + + return { + 'num-records': 1, + 'status': quiesce_status, + 'attributes-list': { + 'snapmirror-info': { + 'mirror-state': mirror_state, + 'schedule': None, + 'source-location': source, + 'relationship-status': status, + 'policy': 'ansible_policy', + 'relationship-type': relationship_type, + 'max-transfer-rate': 10000, + 'identity-preserve': 'true', + 'last-transfer-error': 'last_transfer_error', + 'is-healthy': 'true', + 'unhealthy-reason': 'unhealthy_reason', + }, + 'snapmirror-destination-info': { + 'destination-location': 'ansible' + } + } + } + + +# we only test for existence, contents do not matter +volume_info = { + 'num-records': 1, +} + + +vserver_peer_info = { + 'num-records': 1, + 'attributes-list': { + 'vserver-peer-info': { + 'remote-vserver-name': 'svmsrc3', + 'peer-cluster': 'cluster', + } + } +} + + +ZRR = zapi_responses({ + 'sm_info': build_zapi_response(sm_info(None, 'idle', 'passed')), + 'sm_info_broken_off': build_zapi_response(sm_info('broken_off', 'idle', 'passed')), + 'sm_info_snapmirrored': build_zapi_response(sm_info('snapmirrored', 'idle', 'passed')), + 'sm_info_snapmirrored_from_element': build_zapi_response(sm_info('snapmirrored', 'idle', 'passed', source='10.10.10.11:/lun/1000')), + 'sm_info_snapmirrored_to_element': build_zapi_response(sm_info('snapmirrored', 'idle', 'passed', source='svmsrc3:volsrc1')), + 'sm_info_snapmirrored_load_sharing': build_zapi_response(sm_info('snapmirrored', 'idle', 'passed', 'load_sharing')), + 'sm_info_snapmirrored_vault': build_zapi_response(sm_info('snapmirrored', 'idle', 'passed', 'vault')), + 'sm_info_snapmirrored_quiesced': build_zapi_response(sm_info('snapmirrored', 'quiesced', 'passed')), + 'sm_info_uninitialized': build_zapi_response(sm_info('uninitialized', 'idle', 'passed')), + 'sm_info_uninitialized_load_sharing': build_zapi_response(sm_info('uninitialized', 'idle', 'passed', 'load_sharing')), + 'volume_info': build_zapi_response(volume_info), + 'vserver_peer_info': build_zapi_response(vserver_peer_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + msg = "missing required arguments: hostname" + assert create_module(my_module, {}, fail=True)['msg'] == msg + + +def test_module_fail_unsuuported_rest_options(): + ''' required arguments are reported as errors ''' + module_args = { + "use_rest": "never", + "create_destination": {"enabled": True}, + } + errors = [ + 'Error: using any of', + 'create_destination', + 'requires ONTAP 9.7 or later and REST must be enabled - using ZAPI.' + ] + for error in errors: + assert error in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +if netapp_utils.has_netapp_lib(): + zapi_create_responses = [ + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # ONTAP to ONTAP + ('ZAPI', 'snapmirror-create', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-initialize', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check status + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ] +else: + zapi_create_responses = [] + + +def test_negative_zapi_unsupported_options(): + ''' ZAPI unsupported options ''' + register_responses([ + ]) + module_args = { + "use_rest": "never", + "identity_preservation": "full" + } + msg = "Error: The option identity_preservation is supported only with REST." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +@patch('time.sleep') +def test_successful_create_with_source(dont_sleep): + ''' creating snapmirror and testing idempotency ''' + # earlier versions of pythons don't support *zapi_create_responses + responses = list(zapi_create_responses) + responses.extend([ + # idempotency + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # ONTAP to ONTAP + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # ONTAP to ONTAP, check for update + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + register_responses(responses) + module_args = { + "use_rest": "never", + "source_hostname": "10.10.10.10", + "schedule": "abc", + "identity_preserve": True, + "relationship_type": "data_protection", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args.pop('schedule') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_create_with_peer(dont_sleep): + ''' creating snapmirror and testing idempotency ''' + register_responses(zapi_create_responses) + module_args = { + "use_rest": "never", + "peer_options": {"hostname": "10.10.10.10"}, + "schedule": "abc", + "identity_preserve": True, + "relationship_type": "data_protection", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_negative_break(dont_sleep): + ''' breaking snapmirror to test quiesce time-delay failure ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # 5 retries + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ]) + module_args = { + "use_rest": "never", + "source_hostname": "10.10.10.10", + "relationship_state": "broken", + "relationship_type": "data_protection", + } + msg = "Taking a long time to quiesce SnapMirror relationship, try again later" + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +@patch('time.sleep') +def test_successful_break(dont_sleep): + ''' breaking snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-break', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_broken_off']), + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "source_hostname": "10.10.10.10", + "relationship_state": "broken", + "relationship_type": "data_protection", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_without_initialize(): + ''' creating snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # ONTAP to ONTAP + ('ZAPI', 'snapmirror-create', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # ONTAP to ONTAP + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # ONTAP to ONTAP, check for update + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "source_hostname": "10.10.10.10", + "schedule": "abc", + "relationship_type": "data_protection", + "initialize": False, + "policy": 'ansible_policy', + "max_transfer_rate": 10000, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args.pop('schedule') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_set_source_peer(): + module_args = { + 'connection_type': 'ontap_elementsw' + } + error = 'Error: peer_options are required to identify ONTAP cluster with connection_type: ontap_elementsw' + assert error in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + 'connection_type': 'elementsw_ontap' + } + error = 'Error: peer_options are required to identify SolidFire cluster with connection_type: elementsw_ontap' + assert error in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.create_sf_connection') +def test_set_element_connection(mock_create_sf_cx): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'peer_options': {'hostname': 'any'} + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + elementsw_helper, elem = my_obj.set_element_connection('source') + assert elementsw_helper is not None + assert elem is not None + elementsw_helper, elem = my_obj.set_element_connection('destination') + assert elementsw_helper is not None + assert elem is not None + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.set_element_connection') +def test_successful_element_ontap_create(connection, dont_sleep): + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # element to ONTAP + ('ZAPI', 'snapmirror-create', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-initialize', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check status + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_from_element']), # element to ONTAP + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # element to ONTAP, check for update + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + mock_elem, mock_helper = Mock(), Mock() + connection.return_value = mock_helper, mock_elem + mock_elem.get_cluster_info.return_value.cluster_info.svip = '10.10.10.11' + module_args = { + "use_rest": "never", + "source_hostname": "10.10.10.10", + "connection_type": "elementsw_ontap", + "schedule": "abc", + "source_path": "10.10.10.11:/lun/1000", + "relationship_type": "data_protection", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args.pop('schedule') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.set_element_connection') +def test_successful_ontap_element_create(connection, dont_sleep): + ''' check elementsw parameters for source ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # an existing relationship is required element to ONTAP + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # ONTAP to element + ('ZAPI', 'snapmirror-create', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-initialize', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check status + # idempotency + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # an existing relationship is required element to ONTAP + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_to_element']), # ONTAP to element + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # ONTAP to element, check for update + ]) + mock_elem, mock_helper = Mock(), Mock() + connection.return_value = mock_helper, mock_elem + mock_elem.get_cluster_info.return_value.cluster_info.svip = '10.10.10.11' + module_args = { + "use_rest": "never", + "source_hostname": "10.10.10.10", + "connection_type": "ontap_elementsw", + "schedule": "abc", + "destination_path": "10.10.10.11:/lun/1000", + "relationship_type": "data_protection", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args.pop('schedule') + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_delete(dont_sleep): + ''' deleting snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-break', ZRR['success']), + ('ZAPI', 'snapmirror-get-destination-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-release', ZRR['success']), + ('ZAPI', 'snapmirror-destroy', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # check health + # idempotency + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # check health + ]) + module_args = { + "use_rest": "never", + "state": "absent", + "source_hostname": "10.10.10.10", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_delete_without_source_hostname_check(dont_sleep): + ''' source cluster hostname is optional when source is unknown''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'vserver-peer-get-iter', ZRR['vserver_peer_info']), # validate source svm + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-break', ZRR['success']), + ('ZAPI', 'snapmirror-destroy', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), # check health + ]) + module_args = { + "use_rest": "never", + "state": "absent", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_delete_with_error_on_break(dont_sleep): + ''' source cluster hostname is optional when source is unknown''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-break', ZRR['error']), + ('ZAPI', 'snapmirror-destroy', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "state": "absent", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('Ignored error(s): Error breaking SnapMirror relationship: NetApp API failed. Reason - 12345:synthetic error for UT purpose') + + +@patch('time.sleep') +def test_negative_delete_error_with_error_on_break(dont_sleep): + ''' source cluster hostname is optional when source is unknown''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-break', ZRR['error']), + ('ZAPI', 'snapmirror-destroy', ZRR['error']), + ]) + module_args = { + "use_rest": "never", + "state": "absent", + "validate_source_path": False + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Previous error(s): Error breaking SnapMirror relationship: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + assert 'Error deleting SnapMirror:' in error + + +def test_negative_delete_with_destination_path_missing(): + ''' with misisng destination_path''' + register_responses([ + ]) + args = dict(DEFAULT_ARGS) + args.pop('destination_path') + module_args = { + "use_rest": "never", + "state": "absent", + "source_hostname": "source_host", + } + msg = "Missing parameters: Source path or Destination path" + assert call_main(my_main, args, module_args, fail=True)['msg'] == msg + + +def test_successful_delete_check_get_destination(): + register_responses([ + ('ZAPI', 'snapmirror-get-destination-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-destination-iter', ZRR['no_records']), + ]) + module_args = { + "use_rest": "never", + "state": "absent", + "source_hostname": "source_host", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.set_source_cluster_connection() is None + assert my_obj.get_destination() + assert my_obj.get_destination() is None + + +def test_snapmirror_release(): + register_responses([ + ('ZAPI', 'snapmirror-release', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "destination_vserver", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.set_source_cluster_connection() is None + assert my_obj.snapmirror_release() is None + + +def test_snapmirror_resume(): + ''' resuming snapmirror ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-resume', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # update reads mirror_state + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency test + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # update reads mirror_state + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "relationship_type": "data_protection", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_snapmirror_restore(): + ''' restore snapmirror ''' + register_responses([ + ('ZAPI', 'snapmirror-restore', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency test - TODO + ('ZAPI', 'snapmirror-restore', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "relationship_type": "restore", + "source_snapshot": "source_snapshot", + "clean_up_failure": True, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # TODO: should be idempotent! But we don't read the current state! + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_abort(dont_sleep): + ''' aborting snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-quiesce', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + ('ZAPI', 'snapmirror-break', ZRR['success']), + ('ZAPI', 'snapmirror-destroy', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency test + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "state": "absent", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + ''' modifying snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-modify', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # update reads mirror_state + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # idempotency test + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # update reads mirror_state + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "relationship_type": "data_protection", + "policy": "ansible2", + "schedule": "abc2", + "max_transfer_rate": 2000, + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + "use_rest": "never", + "relationship_type": "data_protection", + "validate_source_path": False + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_initialize(dont_sleep): + ''' initialize snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_uninitialized']), + ('ZAPI', 'snapmirror-initialize', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check status + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # update reads mirror_state + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # 2nd run + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_uninitialized_load_sharing']), + ('ZAPI', 'snapmirror-initialize-ls-set', ZRR['success']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check status + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # update reads mirror_state + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "relationship_type": "data_protection", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + "use_rest": "never", + "relationship_type": "load_sharing", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_update(): + ''' update snapmirror and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored']), # update reads mirror_state + ('ZAPI', 'snapmirror-update', ZRR['success']), # update + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + # 2nd run + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_load_sharing']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_load_sharing']), # update reads mirror_state + ('ZAPI', 'snapmirror-update-ls-set', ZRR['success']), # update + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info']), # check health + ]) + module_args = { + "use_rest": "never", + "relationship_type": "data_protection", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args = { + "use_rest": "never", + "relationship_type": "load_sharing", + "validate_source_path": False + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.set_element_connection') +def test_elementsw_no_source_path(connection): + ''' elementsw_volume_exists ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['no_records']), + ]) + mock_elem, mock_helper = Mock(), Mock() + connection.return_value = mock_helper, mock_elem + mock_elem.get_cluster_info.return_value.cluster_info.svip = '10.11.12.13' + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_username": "source_user", + "connection_type": "ontap_elementsw", + "destination_path": "10.11.12.13:/lun/1234" + } + error = 'Error: creating an ONTAP to ElementSW snapmirror relationship requires an established SnapMirror relation from ElementSW to ONTAP cluster' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_elementsw_volume_exists(): + ''' elementsw_volume_exists ''' + mock_helper = Mock() + mock_helper.volume_id_exists.side_effect = [1000, None] + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_username": "source_user", + "source_path": "10.10.10.10:/lun/1000", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.check_if_elementsw_volume_exists('10.10.10.10:/lun/1000', mock_helper) is None + expect_and_capture_ansible_exception(my_obj.check_if_elementsw_volume_exists, 'fail', '10.10.10.11:/lun/1000', mock_helper) + mock_helper.volume_id_exists.side_effect = ApiServerError('function_name', {}) + error = 'Error fetching Volume details' + assert error in expect_and_capture_ansible_exception(my_obj.check_if_elementsw_volume_exists, 'fail', '1234', mock_helper)['msg'] + + +def test_elementsw_svip_exists(): + ''' svip_exists ''' + mock_elem = Mock() + mock_elem.get_cluster_info.return_value.cluster_info.svip = '10.10.10.10' + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_username": "source_user", + # "source_password": "source_password", + "source_path": "10.10.10.10:/lun/1000", + # "source_volume": "source_volume", + # "source_vserver": "source_vserver", + # "destination_volume": "destination_volume", + # "destination_vserver": "destination_vserver", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.validate_elementsw_svip('10.10.10.10:/lun/1000', mock_elem) is None + + +def test_elementsw_svip_exists_negative(): + ''' svip_exists negative testing''' + mock_elem = Mock() + mock_elem.get_cluster_info.return_value.cluster_info.svip = '10.10.10.10' + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_username": "source_user", + "source_path": "10.10.10.10:/lun/1000", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + expect_and_capture_ansible_exception(my_obj.validate_elementsw_svip, 'fail', '10.10.10.11:/lun/1000', mock_elem) + mock_elem.get_cluster_info.side_effect = ApiServerError('function_name', {}) + error = 'Error fetching SVIP' + assert error in expect_and_capture_ansible_exception(my_obj.validate_elementsw_svip, 'fail', 'svip', mock_elem)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror.NetAppONTAPSnapmirror.set_element_connection') +def test_check_elementsw_params_source(connection): + ''' check elementsw parameters for source ''' + mock_elem, mock_helper = Mock(), Mock() + connection.return_value = mock_helper, mock_elem + mock_elem.get_cluster_info.return_value.cluster_info.svip = '10.10.10.10' + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_username": "source_user", + "source_path": "10.10.10.10:/lun/1000", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.check_elementsw_parameters('source') is None + + +def test_check_elementsw_params_negative(): + ''' check elementsw parameters for source negative testing ''' + args = dict(DEFAULT_ARGS) + del args['source_path'] + module_args = { + "use_rest": "never", + } + msg = 'Error: Missing required parameter source_path' + my_obj = create_module(my_module, args, module_args) + assert msg in expect_and_capture_ansible_exception(my_obj.check_elementsw_parameters, 'fail', 'source')['msg'] + + +def test_check_elementsw_params_invalid(): + ''' check elementsw parameters for source invalid testing ''' + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "destination_vserver", + } + msg = 'Error: invalid source_path' + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert msg in expect_and_capture_ansible_exception(my_obj.check_elementsw_parameters, 'fail', 'source')['msg'] + + +def test_elementsw_source_path_format(): + ''' test element_source_path_format_matches ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['volume_info']), + ]) + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "destination_vserver", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.check_if_remote_volume_exists() + assert my_obj.element_source_path_format_matches('1.1.1.1:dummy') is None + assert my_obj.element_source_path_format_matches('10.10.10.10:/lun/10') is not None + + +def test_remote_volume_exists(): + ''' test check_if_remote_volume_exists ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['volume_info']), + ]) + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "destination_vserver", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.check_if_remote_volume_exists() + + +@patch('time.sleep') +def test_if_all_methods_catch_exception(dont_sleep): + module_args = { + "use_rest": "never", + "source_hostname": "source_host", + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "destination_vserver", + } + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_quiesced']), + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.source_server = my_obj.server # for get_destination + tests = [ + (my_obj.check_if_remote_volume_exists, [('volume-get-iter', 'error')], 'Error fetching source volume details source_volume:'), + (my_obj.get_destination, [('snapmirror-get-destination-iter', 'error')], 'Error fetching snapmirror destinations info:'), + (my_obj.get_svm_peer, [('vserver-peer-get-iter', 'error')], 'Error fetching vserver peer info:'), + (my_obj.snapmirror_abort, [('snapmirror-abort', 'error')], 'Error aborting SnapMirror relationship:'), + (my_obj.snapmirror_break, [('snapmirror-quiesce', 'success'), ('snapmirror-get-iter', 'sm_info_snapmirrored_quiesced'), ('snapmirror-break', 'error')], + 'Error breaking SnapMirror relationship:'), + (my_obj.snapmirror_create, [('volume-get-iter', 'success')], 'Source volume does not exist. Please specify a volume that exists'), + (my_obj.snapmirror_create, [('volume-get-iter', 'volume_info'), ('snapmirror-create', 'error')], 'Error creating SnapMirror'), + (my_obj.snapmirror_delete, [('snapmirror-destroy', 'error')], 'Error deleting SnapMirror:'), + (my_obj.snapmirror_get, [('snapmirror-get-iter', 'error')], 'Error fetching snapmirror info:'), + (my_obj.snapmirror_initialize, [('snapmirror-get-iter', 'sm_info'), ('snapmirror-initialize', 'error')], 'Error initializing SnapMirror:'), + (my_obj.snapmirror_modify, [('snapmirror-modify', 'error')], 'Error modifying SnapMirror schedule or policy:'), + (my_obj.snapmirror_quiesce, [('snapmirror-quiesce', 'error')], 'Error quiescing SnapMirror:'), + (my_obj.snapmirror_release, [('snapmirror-release', 'error')], 'Error releasing SnapMirror relationship:'), + (my_obj.snapmirror_resume, [('snapmirror-resume', 'error')], 'Error resuming SnapMirror relationship:'), + (my_obj.snapmirror_restore, [('snapmirror-restore', 'error')], 'Error restoring SnapMirror relationship:'), + (my_obj.snapmirror_resync, [('snapmirror-resync', 'error')], 'Error resyncing SnapMirror relationship:'), + (my_obj.snapmirror_update, [('snapmirror-update', 'error')], 'Error updating SnapMirror:'), + ] + for (function, zapis, error) in tests: + calls = [('ZAPI', zapi[0], ZRR[zapi[1]]) for zapi in zapis] + register_responses(calls) + if function in (my_obj.get_svm_peer,): + assert error in expect_and_capture_ansible_exception(function, 'fail', 's_svm', 'd_svm')['msg'] + elif function in (my_obj.snapmirror_update, my_obj.snapmirror_modify): + assert error in expect_and_capture_ansible_exception(function, 'fail', {})['msg'] + else: + assert error in expect_and_capture_ansible_exception(function, 'fail')['msg'] + + +@patch('time.sleep') +def test_successful_rest_create(dont_sleep): + ''' creating snapmirror and testing idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ('POST', 'snapmirror/relationships', SRR['success']), + ('GET', 'snapmirror/relationships', SRR['sm_get_uninitialized']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check initialized + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check health + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check health + ]) + module_args = { + "use_rest": "always", + "schedule": "abc", + "identity_preservation": "full" + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['update'] = False + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_rest_create(): + ''' creating snapmirror with unsupported REST options ''' + module_args = { + "use_rest": "always", + "identity_preserve": True, + "schedule": "abc", + "relationship_type": "data_protection", + } + msg = "REST API currently does not support 'identity_preserve, relationship_type: data_protection'" + assert create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_negative_rest_create_schedule_not_supported(): + ''' creating snapmirror with unsupported REST options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + "use_rest": "always", + "schedule": "abc", + } + msg = "Error: Minimum version of ONTAP for schedule is (9, 11, 1). Current version: (9, 8, 0)."\ + " - With REST use the policy option to define a schedule." + assert create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_negative_rest_create_identity_preservation_not_supported(): + ''' creating snapmirror with unsupported REST options ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + "use_rest": "always", + "identity_preservation": "full", + } + msg = "Error: Minimum version of ONTAP for identity_preservation is (9, 11, 1). Current version: (9, 8, 0)." + error = create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error == msg + + +def test_negative_rest_get_error(): + ''' creating snapmirror with API error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + } + msg = "Error getting SnapMirror svmdst3:voldst1: calling: snapmirror/relationships: got Expected error." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_negative_rest_create_error(): + ''' creating snapmirror with API error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ('POST', 'snapmirror/relationships', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + } + msg = "Error creating SnapMirror: calling: snapmirror/relationships: got Expected error." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +@patch('time.sleep') +def test_rest_snapmirror_initialize(dont_sleep): + ''' snapmirror initialize testing ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_uninitialized_xfering']), + ('GET', 'snapmirror/relationships', SRR['sm_get_uninitialized_xfering']), + ('GET', 'snapmirror/relationships', SRR['sm_get_uninitialized']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # Inside SM init patch response + ('GET', 'snapmirror/relationships', SRR['sm_get_data_transferring']), # get to check status after initialize + ('GET', 'snapmirror/relationships', SRR['sm_get_data_transferring']), # get to check status after initialize + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # get to check status after initialize + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check for update + ('POST', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06/transfers', SRR['success']), # update + ('GET', 'snapmirror/relationships', SRR['sm_get_uninitialized']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_snapmirror_update(): + ''' snapmirror initialize testing ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # first sm_get + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply update calls again sm_get + ('POST', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06/transfers', SRR['success']), # sm update + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_sm_break_success_no_data_transfer(dont_sleep): + ''' testing snapmirror break when no_data are transferring ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get with no data transfer + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # SM quiesce response to pause + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # sm quiesce api fn calls again sm_get + # sm quiesce validate the state which calls sm_get + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + # sm quiesce validate the state which calls sm_get after wait + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm break response + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "relationship_state": "broken", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_sm_break_success_no_data_transfer_idempotency(): + ''' testing snapmirror break when no_data are transferring idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_broken']), # apply first sm_get with no data transfer + ('GET', 'snapmirror/relationships', SRR['sm_get_broken']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "relationship_state": "broken", + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_sm_break_fails_if_uninit(): + ''' testing snapmirror break fails if sm state uninitialized ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # apply first sm_get with state uninitialized + ('GET', 'snapmirror/relationships', SRR['sm_get_uninitialized']), + ]) + module_args = { + "use_rest": "always", + "relationship_state": "broken", + } + msg = "SnapMirror relationship cannot be broken if mirror state is uninitialized" + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_sm_break_fails_if_load_sharing_or_vault(): + ''' testing snapmirror break fails for load_sharing or vault types ''' + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_load_sharing']), + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored_vault']), + ]) + module_args = { + "use_rest": "never", + "relationship_state": "broken", + "relationship_type": "load_sharing", + "validate_source_path": False + } + msg = "SnapMirror break is not allowed in a load_sharing or vault relationship" + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + module_args['relationship_type'] = 'vault' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +@patch('time.sleep') +def test_rest_snapmirror_quiesce_fail_when_state_not_paused(dont_sleep): + ''' testing snapmirror break when no_data are transferring ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get with no data transfer + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # SM quiesce response + # SM quiesce validate the state which calls sm_get after wait + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # first fail + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # second fail + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # third fail + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # fourth fail + ]) + module_args = { + "use_rest": "always", + "relationship_state": "broken", + "validate_source_path": False + } + msg = "Taking a long time to quiesce SnapMirror relationship, try again later" + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_snapmirror_break_fails_if_data_is_transferring(): + ''' testing snapmirror break when no_data are transferring ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # apply first sm_get with data transfer + ('GET', 'snapmirror/relationships', SRR['sm_get_data_transferring']), + ]) + module_args = { + "use_rest": "always", + "relationship_state": "broken", + } + msg = "snapmirror data are transferring" + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +@patch('time.sleep') +def test_rest_resync_when_state_is_broken(dont_sleep): + ''' resync when snapmirror state is broken and relationship_state active ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_broken']), # apply first sm_get with state broken_off + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm resync response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check for idle + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_resume_when_state_quiesced(): + ''' resync when snapmirror state is broken and relationship_state active ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), # apply first sm_get with state quiesced + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm resync response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # sm update calls sm_get + ('POST', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06/transfers', SRR['success']), # sm update response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_snapmirror_delete(dont_sleep): + ''' snapmirror delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get with no data transfer + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm quiesce response + # sm quiesce validate the state which calls sm_get + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + # sm quiesce validate the state which calls sm_get after wait with 0 iter + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm break response + ('DELETE', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm delete response + ('GET', 'snapmirror/relationships', SRR['zero_records']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "state": "absent", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_snapmirror_delete_with_error_on_break(dont_sleep): + ''' snapmirror delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get with no data transfer + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm quiesce response + # sm quiesce validate the state which calls sm_get + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + # sm quiesce validate the state which calls sm_get after wait with 0 iter + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['generic_error']), # sm break response + ('DELETE', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm delete response + ('GET', 'snapmirror/relationships', SRR['zero_records']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "state": "absent", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised("Ignored error(s): Error patching SnapMirror: {'state': 'broken_off'}: " + "calling: snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06: got Expected error.") + + +@patch('time.sleep') +def test_rest_snapmirror_delete_with_error_on_break_and_delete(dont_sleep): + ''' snapmirror delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get with no data transfer + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm quiesce response + # sm quiesce validate the state which calls sm_get + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + # sm quiesce validate the state which calls sm_get after wait with 0 iter + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['generic_error']), # sm break response + ('DELETE', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['generic_error']), # sm delete response + ]) + module_args = { + "use_rest": "always", + "state": "absent", + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print_warnings() + assert "Previous error(s): Error patching SnapMirror: {'state': 'broken_off'}" in error + assert "Error deleting SnapMirror: calling: snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06: got Expected error" in error + + +@patch('time.sleep') +def test_rest_snapmirror_delete_calls_abort(dont_sleep): + ''' snapmirror delete calls abort when transfer state is in transferring''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # apply first sm_get with data transfer + ('GET', 'snapmirror/relationships', SRR['sm_get_data_transferring']), + # abort + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06/transfers/xfer_uuid', SRR['empty_good']), + ('GET', 'snapmirror/relationships', SRR['sm_get_abort']), # wait_for_status calls again sm_get + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm quiesce response + # sm quiesce validate the state which calls sm_get + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + # sm quiesce validate the state which calls sm_get after wait with 0 iter + ('GET', 'snapmirror/relationships', SRR['sm_get_paused']), + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm break response + ('DELETE', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm delete response + ('GET', 'snapmirror/relationships', SRR['zero_records']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "state": "absent", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_snapmirror_modify(): + ''' snapmirror modify''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm modify response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # sm update calls sm_get to check mirror state + ('POST', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06/transfers', SRR['success']), # sm update response + ('GET', 'snapmirror/relationships', SRR['zero_records']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "policy": "Asynchronous", + "schedule": "abcdef", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_snapmirror_modify_warning(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get + ('PATCH', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06', SRR['success']), # sm modify response + ]) + module_args = { + "use_rest": "always", + "policy": "Asynchronous", + "schedule": "abcdef", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.snapmirror_mod_init_resync_break_quiesce_resume_rest(modify=module_args) is None + print_warnings() + assert_warning_was_raised('Unexpected key in modify: use_rest, value: always') + + +def test_rest_snapmirror_restore(): + ''' snapmirror restore ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # apply first sm_get + ('POST', 'snapmirror/relationships', SRR['success']), # first post response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # After first post call to get relationship uuid + ('POST', 'snapmirror/relationships/b5ee4571-5429-11ec-9779-005056b39a06/transfers', SRR['success']), # second post response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "relationship_type": "restore", + "source_snapshot": "source_snapshot", + "clean_up_failure": False, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_snapmirror_create_and_initialize_not_found(): + ''' snapmirror restore ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), # apply first sm_get + ('GET', 'snapmirror/policies', SRR['zero_records']), # policy not found + ]) + module_args = { + "use_rest": "always", + "create_destination": {"enabled": True}, + "policy": "sm_policy" + } + error = 'Error: cannot find policy sm_policy for vserver svmdst3' + assert error == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_error_snapmirror_create_and_initialize_bad_type(): + ''' snapmirror restore ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), # apply first sm_get + ('GET', 'snapmirror/policies', SRR['sm_policies']), # policy + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), # apply first sm_get + ('GET', 'snapmirror/policies', SRR['sm_policies']), # policy + ]) + module_args = { + "use_rest": "always", + "create_destination": {"enabled": True}, + "policy": "sm_policy", + "destination_vserver": "bad_type", + "source_vserver": "any" + } + error = 'Error: unexpected type: svm_invalid for policy sm_policy for vserver bad_type' + assert error == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['destination_vserver'] = 'cluster_scope_only' + error = 'Error: unexpected type: system_invalid for policy sm_policy for vserver cluster_scope_only' + assert error == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_errors(): + ''' generic REST errors ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # set_initialization_state + ('GET', 'snapmirror/policies', SRR['generic_error']), + # snapmirror_restore_rest + ('POST', 'snapmirror/relationships', SRR['generic_error']), + # snapmirror_restore_rest + ('POST', 'snapmirror/relationships', SRR['success']), + ('POST', 'snapmirror/relationships/1234/transfers', SRR['generic_error']), + # snapmirror_mod_init_resync_break_quiesce_resume_rest + ('PATCH', 'snapmirror/relationships/1234', SRR['generic_error']), + # snapmirror_update_rest + ('POST', 'snapmirror/relationships/1234/transfers', SRR['generic_error']), + # snapmirror_abort_rest + ('PATCH', 'snapmirror/relationships/1234/transfers/5678', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + "policy": "policy" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = rest_error_message("Error fetching SnapMirror policy", 'snapmirror/policies') + assert error in expect_and_capture_ansible_exception(my_obj.set_initialization_state, 'fail')['msg'] + my_obj.parameters['uuid'] = '1234' + my_obj.parameters['transfer_uuid'] = '5678' + error = rest_error_message("Error restoring SnapMirror", 'snapmirror/relationships') + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_restore_rest, 'fail')['msg'] + error = rest_error_message("Error restoring SnapMirror Transfer", 'snapmirror/relationships/1234/transfers') + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_restore_rest, 'fail')['msg'] + my_obj.na_helper.changed = True + assert my_obj.snapmirror_mod_init_resync_break_quiesce_resume_rest() is None + assert not my_obj.na_helper.changed + error = rest_error_message("Error patching SnapMirror: {'state': 'broken_off'}", 'snapmirror/relationships/1234') + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_mod_init_resync_break_quiesce_resume_rest, 'fail', 'broken_off')['msg'] + error = rest_error_message('Error updating SnapMirror relationship', 'snapmirror/relationships/1234/transfers') + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_update_rest, 'fail')['msg'] + error = rest_error_message('Error aborting SnapMirror', 'snapmirror/relationships/1234/transfers/5678') + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_abort_rest, 'fail')['msg'] + + +def test_rest_error_no_uuid(): + ''' snapmirror restore ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + # snapmirror_restore_rest + ('POST', 'snapmirror/relationships', SRR['success']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + # snapmirror_mod_init_resync_break_quiesce_resume_rest + ('GET', 'snapmirror/relationships', SRR['zero_records']), + # snapmirror_update_rest + ('GET', 'snapmirror/relationships', SRR['zero_records']), + # others, no call + ]) + module_args = { + "use_rest": "always", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error restoring SnapMirror: unable to get UUID for the SnapMirror relationship.' + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_restore_rest, 'fail')['msg'] + error = 'Error in updating SnapMirror relationship: unable to get UUID for the SnapMirror relationship.' + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_mod_init_resync_break_quiesce_resume_rest, 'fail')['msg'] + error = 'Error in updating SnapMirror relationship: unable to get UUID for the SnapMirror relationship.' + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_update_rest, 'fail')['msg'] + error = 'Error in aborting SnapMirror: unable to get either uuid: None or transfer_uuid: None.' + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_abort_rest, 'fail')['msg'] + error = 'Error in deleting SnapMirror: None, unable to get UUID for the SnapMirror relationship.' + assert error in expect_and_capture_ansible_exception(my_obj.snapmirror_delete_rest, 'fail')['msg'] + + +@patch('time.sleep') +def test_rest_snapmirror_create_and_initialize(dont_sleep): + ''' snapmirror restore ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), # apply first sm_get + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'storage/volumes', SRR['one_record']), + ('GET', 'snapmirror/policies', SRR['sm_policies']), # policy + ('POST', 'snapmirror/relationships', SRR['success']), # first post response + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check status + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), # check_health calls sm_get + ]) + module_args = { + "use_rest": "always", + "create_destination": {"enabled": True}, + "policy": "sm_policy", + # force a call to check_if_remote_volume_exists + "peer_options": {"hostname": "10.10.10.10"}, + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "svmdst3" + + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_set_new_style(): + # validate the old options are set properly using new endpoints + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + args = dict(DEFAULT_ARGS) + args.pop('source_path') + args.pop('destination_path') + module_args = { + "use_rest": "always", + "source_endpoint": { + "cluster": "source_cluster", + "consistency_group_volumes": "source_consistency_group_volumes", + "path": "source_path", + "svm": "source_svm", + }, + "destination_endpoint": { + "cluster": "destination_cluster", + "consistency_group_volumes": "destination_consistency_group_volumes", + "path": "destination_path", + "svm": "destination_svm", + }, + } + my_obj = create_module(my_module, args, module_args) + assert my_obj.set_new_style() is None + assert my_obj.new_style + assert my_obj.parameters['destination_vserver'] == 'destination_svm' + assert my_obj.set_initialization_state() == 'in_sync' + + +def test_negative_set_new_style(): + # validate the old options are set properly using new endpoints + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster', SRR['is_rest_97']), + ]) + args = dict(DEFAULT_ARGS) + args.pop('source_path') + args.pop('destination_path') + module_args = { + "use_rest": "always", + "source_endpoint": { + "cluster": "source_cluster", + "consistency_group_volumes": "source_consistency_group_volumes", + "path": "source_path", + "svm": "source_svm", + }, + "destination_endpoint": { + "cluster": "destination_cluster", + "consistency_group_volumes": "destination_consistency_group_volumes", + "path": "destination_path", + "svm": "destination_svm", + }, + } + # errors on source_endpoint + my_obj = create_module(my_module, args, module_args) + error = expect_and_capture_ansible_exception(my_obj.set_new_style, 'fail')['msg'] + assert "Error: using any of ['cluster', 'ipspace'] requires ONTAP 9.7 or later and REST must be enabled" in error + assert "ONTAP version: 9.6.0 - using REST" in error + my_obj = create_module(my_module, args, module_args) + error = expect_and_capture_ansible_exception(my_obj.set_new_style, 'fail')['msg'] + assert "Error: using consistency_group_volumes requires ONTAP 9.8 or later and REST must be enabled" in error + assert "ONTAP version: 9.7.0 - using REST" in error + # errors on destination_endpoint + module_args['source_endpoint'].pop('cluster') + my_obj = create_module(my_module, args, module_args) + error = expect_and_capture_ansible_exception(my_obj.set_new_style, 'fail')['msg'] + assert "Error: using any of ['cluster', 'ipspace'] requires ONTAP 9.7 or later and REST must be enabled" in error + assert "ONTAP version: 9.6.0 - using REST" in error + module_args['source_endpoint'].pop('consistency_group_volumes') + my_obj = create_module(my_module, args, module_args) + error = expect_and_capture_ansible_exception(my_obj.set_new_style, 'fail')['msg'] + assert "Error: using consistency_group_volumes requires ONTAP 9.8 or later and REST must be enabled" in error + assert "ONTAP version: 9.7.0 - using REST" in error + module_args.pop('source_endpoint') + module_args.pop('destination_endpoint') + my_obj = create_module(my_module, args, module_args) + error = expect_and_capture_ansible_exception(my_obj.set_new_style, 'fail')['msg'] + assert error == 'Missing parameters: Source endpoint or Destination endpoint' + + +def test_check_parameters_new_style(): + # validate the old options are set properly using new endpoints + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + args = dict(DEFAULT_ARGS) + args.pop('source_path') + args.pop('destination_path') + module_args = { + "use_rest": "always", + "source_endpoint": { + "cluster": "source_cluster", + "consistency_group_volumes": "source_consistency_group_volumes", + "path": "source_path", + "svm": "source_svm", + }, + "destination_endpoint": { + "cluster": "destination_cluster", + "consistency_group_volumes": "destination_consistency_group_volumes", + "path": "destination_path", + "svm": "destination_svm", + }, + } + my_obj = create_module(my_module, args, module_args) + assert my_obj.check_parameters() is None + assert my_obj.new_style + assert my_obj.parameters['destination_vserver'] == 'destination_svm' + + +def test_negative_check_parameters_new_style(): + # validate version checks + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_97']), + ]) + args = dict(DEFAULT_ARGS) + args.pop('source_path') + args.pop('destination_path') + module_args = { + "use_rest": "always", + "source_endpoint": { + "cluster": "source_cluster", + "consistency_group_volumes": "source_consistency_group_volumes", + "path": "source_path", + "svm": "source_svm", + }, + "destination_endpoint": { + "cluster": "destination_cluster", + "consistency_group_volumes": "destination_consistency_group_volumes", + "path": "destination_path", + "svm": "destination_svm", + }, + "create_destination": {"enabled": True} + } + # errors on source_endpoint + error = 'Minimum version of ONTAP for create_destination is (9, 7).' + assert error in create_module(my_module, args, module_args, fail=True)['msg'] + my_obj = create_module(my_module, args, module_args) + error = expect_and_capture_ansible_exception(my_obj.check_parameters, 'fail')['msg'] + assert "Error: using consistency_group_volumes requires ONTAP 9.8 or later and REST must be enabled" in error + assert "ONTAP version: 9.7.0 - using REST" in error + module_args['destination_endpoint'].pop('path') + error = create_module(my_module, args, module_args, fail=True)['msg'] + assert "missing required arguments: path found in destination_endpoint" in error + + +def test_check_parameters_old_style(): + # validate the old options are set properly using new endpoints + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ]) + # using paths + module_args = { + "use_rest": "always", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.check_parameters() is None + assert not my_obj.new_style + # using volume and vserver, paths are constructed + args = dict(DEFAULT_ARGS) + args.pop('source_path') + args.pop('destination_path') + module_args = { + "use_rest": "always", + "source_volume": "source_vol", + "source_vserver": "source_svm", + "destination_volume": "dest_vol", + "destination_vserver": "dest_svm", + } + my_obj = create_module(my_module, args, module_args) + assert my_obj.check_parameters() is None + assert not my_obj.new_style + assert my_obj.parameters['source_path'] == "source_svm:source_vol" + assert my_obj.parameters['destination_path'] == "dest_svm:dest_vol" + # vserver DR + module_args = { + "use_rest": "always", + "source_vserver": "source_svm", + "destination_vserver": "dest_svm", + } + my_obj = create_module(my_module, args, module_args) + assert my_obj.check_parameters() is None + assert not my_obj.new_style + assert my_obj.parameters['source_path'] == "source_svm:" + assert my_obj.parameters['destination_path'] == "dest_svm:" + body, dummy = my_obj.get_create_body() + assert body["source"] == {"path": "source_svm:"} + module_args = { + "use_rest": "always", + "source_volume": "source_vol", + "source_vserver": "source_svm", + "destination_volume": "dest_vol", + "destination_vserver": "dest_svm", + } + my_obj = create_module(my_module, args, module_args) + my_obj.parameters.pop("source_vserver") + error = 'Missing parameters: source vserver or destination vserver or both' + assert error in expect_and_capture_ansible_exception(my_obj.check_parameters, 'fail')['msg'] + + +def test_validate_source_path(): + # validate source path when vserver local name is different + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peers', SRR['zero_records']), + ('GET', 'svm/peers', SRR['svm_peer_info']), + ('GET', 'svm/peers', SRR['svm_peer_info']), + # error + ('GET', 'svm/peers', SRR['generic_error']), + # warnings + ]) + # using paths + module_args = { + "use_rest": "always", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + current = None + assert my_obj.validate_source_path(current) is None + current = {} + assert my_obj.validate_source_path(current) is None + current = {'source_path': 'svmsrc3:volsrc1'} + assert my_obj.validate_source_path(current) is None + current = {'source_path': 'svmsrc3:volsrc1'} + assert my_obj.validate_source_path(current) is None + current = {'source_path': 'vserver:volume'} + error = 'Error: another relationship is present for the same destination with source_path: "vserver:volume" '\ + '(vserver:volume on cluster cluster). Desired: svmsrc3:volsrc1 on None' + assert error in expect_and_capture_ansible_exception(my_obj.validate_source_path, 'fail', current)['msg'] + current = {'source_path': 'vserver:volume1'} + my_obj.parameters['connection_type'] = 'other' + error = 'Error: another relationship is present for the same destination with source_path: "vserver:volume1".'\ + ' Desired: svmsrc3:volsrc1 on None' + assert error in expect_and_capture_ansible_exception(my_obj.validate_source_path, 'fail', current)['msg'] + my_obj.parameters['connection_type'] = 'ontap_ontap' + current = {'source_path': 'vserver:volume'} + error = rest_error_message('Error retrieving SVM peer', 'svm/peers') + assert error in expect_and_capture_ansible_exception(my_obj.validate_source_path, 'fail', current)['msg'] + current = {'source_path': 'vserver/volume'} + assert my_obj.validate_source_path(current) is None + assert_warning_was_raised('Unexpected source path: vserver/volume, skipping validation.') + my_obj.parameters['destination_endpoint'] = {'path': 'vserver/volume'} + current = {'source_path': 'vserver:volume'} + assert my_obj.validate_source_path(current) is None + assert_warning_was_raised('Unexpected destination path: vserver/volume, skipping validation.') + + +@patch('time.sleep') +def test_wait_for_idle_status(dont_sleep): + # validate wait time and time-out + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ('GET', 'snapmirror/relationships', SRR['sm_get_mirrored']), + # time-out + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ]) + # using paths + module_args = { + "use_rest": "always", + "transferring_time_out": 0, + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.wait_for_idle_status() is None + assert my_obj.wait_for_idle_status() is not None + module_args = { + "use_rest": "always", + "transferring_time_out": 60, + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.wait_for_idle_status() is not None + assert my_obj.wait_for_idle_status() is None + assert_warning_was_raised('SnapMirror relationship is still transferring after 60 seconds.') + + +def test_dp_to_xdp(): + # with ZAPI, DP is transformed to XDP to match ONTAP behavior + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored']), + ]) + # using paths + module_args = { + "use_rest": "never", + "relationship_type": 'data_protection', + "validate_source_path": False + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_actions() is not None + assert my_obj.parameters['relationship_type'] == 'extended_data_protection' + + +def test_cannot_change_rtype(): + # with ZAPI, can't change relationship_type + register_responses([ + ('ZAPI', 'snapmirror-get-iter', ZRR['sm_info_snapmirrored']), + ]) + # using paths + module_args = { + "use_rest": "never", + "relationship_type": 'load_sharing', + "validate_source_path": False + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error: cannot modify relationship_type from extended_data_protection to load_sharing.' + assert error in expect_and_capture_ansible_exception(my_obj.get_actions, 'fail', )['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_check_health(): + # validate source path when vserver local name is different + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'snapmirror/relationships', SRR['zero_records']), + ('GET', 'snapmirror/relationships', SRR['sm_get_abort']), + ]) + module_args = { + "use_rest": "always", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.check_health() is None + assert_no_warnings() + assert my_obj.check_health() is None + assert_warning_was_raised('SnapMirror relationship exists but is not healthy. ' + 'Unhealthy reason: this is why the relationship is not healthy. ' + 'Last transfer error: this is why the relationship is not healthy.') + + +def test_negative_check_if_remote_volume_exists_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/volumes', SRR['zero_records']), + ('GET', 'storage/volumes', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'REST is not supported on Source' + assert error in expect_and_capture_ansible_exception(my_obj.check_if_remote_volume_exists_rest, 'fail')['msg'] + my_obj.src_use_rest = True + assert not my_obj.check_if_remote_volume_exists_rest() + my_obj.parameters['peer_options'] = {} + netapp_utils.setup_host_options_from_module_params(my_obj.parameters['peer_options'], my_obj.module, netapp_utils.na_ontap_host_argument_spec_peer().keys()) + my_obj.parameters['source_volume'] = 'volume' + my_obj.parameters['source_vserver'] = 'vserver' + assert my_obj.set_source_cluster_connection() is None + assert not my_obj.check_if_remote_volume_exists_rest() + error = rest_error_message('Error fetching source volume', 'storage/volumes') + assert error in expect_and_capture_ansible_exception(my_obj.check_if_remote_volume_exists_rest, 'fail')['msg'] + + +def test_snapmirror_release_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = { + "use_rest": "always", + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.snapmirror_release() is None + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_negative_set_source_cluster_connection(mock_netapp_lib): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = { + "use_rest": "never", + "source_volume": "source_volume", + "source_vserver": "source_vserver", + "destination_volume": "destination_volume", + "destination_vserver": "destination_vserver", + "relationship_type": "vault", + "peer_options": { + "use_rest": "always", + "hostname": "source_host", + } + } + mock_netapp_lib.side_effect = [True, False] + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "REST API currently does not support 'relationship_type: vault'" + assert error in expect_and_capture_ansible_exception(my_obj.set_source_cluster_connection, 'fail')['msg'] + my_obj.parameters['peer_options']['use_rest'] = 'auto' + error = "Error: the python NetApp-Lib module is required. Import error: None" + assert error in expect_and_capture_ansible_exception(my_obj.set_source_cluster_connection, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py new file mode 100644 index 000000000..23a1e9c64 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapmirror_policy.py @@ -0,0 +1,1269 @@ +# (c) 2019-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_snapmirror_policy ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, patch_ansible, expect_and_capture_ansible_exception, create_and_apply + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapmirror_policy import NetAppOntapSnapMirrorPolicy as my_module, main as my_main + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'success': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_snapmirror_policy_async': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'async', + 'snapmirror_label': [], + 'keep': [], + 'schedule': [], + 'prefix': [], + 'network_compression_enabled': True, + 'identity_preservation': 'exclude_network_config' + }, None), + 'get_snapmirror_policy_async_with_options': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'async', + 'snapmirror_label': [], + 'keep': [], + 'schedule': [], + 'prefix': [], + 'copy_latest_source_snapshot': True, + 'network_compression_enabled': True, + 'identity_preservation': 'exclude_network_config' + }, None), + 'get_snapmirror_policy_sync': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'sync', + 'snapmirror_label': [], + 'keep': [], + 'schedule': [], + 'prefix': [], + 'network_compression_enabled': False + }, None), + 'get_snapmirror_policy_async_with_rules': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'async', + 'retention': [ + { + 'label': 'daily', + 'count': 7, + 'creation_schedule': {'name': ''}, + 'prefix': '', + }, + { + 'label': 'weekly', + 'count': 5, + 'creation_schedule': {'name': 'weekly'}, + 'prefix': 'weekly', + }, + { + 'label': 'monthly', + 'count': 12, + 'creation_schedule': {'name': 'monthly'}, + 'prefix': 'monthly', + }, + ], + 'network_compression_enabled': False + }, None), + 'get_snapmirror_policy_async_with_rules_dash': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'async', + 'retention': [ + { + 'label': 'daily', + 'count': 7, + 'creation_schedule': {'name': ''}, + 'prefix': '', + }, + { + 'label': 'weekly', + 'count': 5, + 'creation_schedule': {'name': 'weekly'}, + 'prefix': 'weekly', + }, + { + 'label': 'monthly', + 'count': 12, + 'creation_schedule': {'name': '-'}, + 'prefix': '-', + }, + ], + 'network_compression_enabled': False + }, None), + 'get_snapmirror_policy_async_with_create_snapshot_on_source': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'async', + 'retention': [ + { + 'label': 'daily', + 'count': 7, + 'creation_schedule': {'name': ''}, + 'prefix': '', + }, + ], + 'create_snapshot_on_source': False, + 'is_network_compression_enabled': True, + 'transfer_schedule': {'name': 'yearly'}, + }, None), + 'get_snapmirror_policy_sync_with_sync_type': (200, { + 'svm': {'name': 'ansible'}, + 'name': 'ansible', + 'uuid': 'abcdef12-3456-7890-abcd-ef1234567890', + 'comment': 'created by ansible', + 'type': 'sync', + 'sync_type': 'automated_failover', + # does not make sense, but does not hurt + 'copy_all_source_snapshots': False + }, None), +}) + + +snapmirror_policy_info = { + 'comment': 'created by ansible', + 'policy-name': 'ansible', + 'type': 'async_mirror', + 'tries': '8', + 'transfer-priority': 'normal', + 'restart': 'always', + 'is-network-compression-enabled': 'false', + 'ignore-atime': 'false', + 'vserver-name': 'ansible', + 'common-snapshot-schedule': 'monthly' +} + +snapmirror_policy_rules = { + 'snapmirror-policy-rules': [ + {'info': { + 'snapmirror-label': 'daily', + 'keep': 7, + 'schedule': '', + 'prefix': '', + }}, + {'info': { + 'snapmirror-label': 'weekly', + 'keep': 5, + 'schedule': 'weekly', + 'prefix': 'weekly', + }}, + {'info': { + 'snapmirror-label': 'monthly', + 'keep': 12, + 'schedule': 'monthly', + 'prefix': 'monthly', + }}, + {'info': { + 'snapmirror-label': 'sm_created', + 'keep': 12, + 'schedule': 'monthly', + 'prefix': 'monthly', + }}, + ] +} + + +def get_snapmirror_policy_info(with_rules=False): + info = dict(snapmirror_policy_info) + if with_rules: + info.update(snapmirror_policy_rules) + return {'attributes-list': {'snapmirror-policy-info': info}} + + +ZRR = zapi_responses({ + 'snapmirror-policy-info': build_zapi_response(get_snapmirror_policy_info()), + 'snapmirror-policy-info-with-rules': build_zapi_response(get_snapmirror_policy_info(True)), + 'error_13001': build_zapi_error(13001, 'policy not found'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'use_rest': 'use_rest', + 'policy_name': 'ansible', + 'vserver': 'ansible', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + args = dict(DEFAULT_ARGS) + args.pop('policy_name') + error = 'missing required arguments: policy_name' + assert error in call_main(my_main, args, fail=True)['msg'] + + +def test_ensure_get_called(): + ''' test get_snapmirror_policy for non-existent snapmirror policy''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_snapmirror_policy() is None + + +def test_ensure_get_called_existing(): + ''' test get_snapmirror_policy for existing snapmirror policy''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['snapmirror-policy-info']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_snapmirror_policy() + + +def test_successful_create(): + ''' creating snapmirror policy without rules and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['no_records']), + ('ZAPI', 'snapmirror-policy-create', ZRR['success']), + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['snapmirror-policy-info']), + ]) + module_args = { + 'use_rest': 'never', + 'transfer_priority': 'normal' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_with_rest(): + ''' creating snapmirror policy without rules via REST and testing idempotency ''' + register_responses([ + # default is async + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + # explicitly async + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async_with_options']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async_with_options']), + # sync + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_sync']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_sync']), + ]) + module_args = { + 'use_rest': 'always', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['policy_type'] = 'async_mirror' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['policy_type'] = 'sync_mirror' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_with_rules(): + ''' creating snapmirror policy with rules and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['error_13001']), + ('ZAPI', 'snapmirror-policy-create', ZRR['success']), + ('ZAPI', 'snapmirror-policy-add-rule', ZRR['success']), + ('ZAPI', 'snapmirror-policy-add-rule', ZRR['success']), + ('ZAPI', 'snapmirror-policy-add-rule', ZRR['success']), + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['snapmirror-policy-info-with-rules']), + ]) + module_args = { + 'use_rest': 'never', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_with_rules_via_rest(): + ''' creating snapmirror policy with rules via rest and testing idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + ('PATCH', 'snapmirror/policies/abcdef12-3456-7890-abcd-ef1234567890', SRR['success']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async_with_rules']), + ]) + module_args = { + 'use_rest': 'always', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete(): + ''' deleting snapmirror policy and testing idempotency ''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['snapmirror-policy-info']), + ('ZAPI', 'snapmirror-policy-delete', ZRR['success']), + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'state': 'absent' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete_with_rest(): + ''' deleting snapmirror policy via REST and testing idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async_with_rules_dash']), + ('DELETE', 'snapmirror/policies/abcdef12-3456-7890-abcd-ef1234567890', SRR['success']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async_with_rules']), + ('DELETE', 'snapmirror/policies/abcdef12-3456-7890-abcd-ef1234567890', SRR['success']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ]) + module_args = { + 'state': 'absent', + 'use_rest': 'always', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + ''' modifying snapmirror policy without rules. idempotency was tested in create ''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['snapmirror-policy-info']), + ('ZAPI', 'snapmirror-policy-modify', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'comment': 'old comment', + 'ignore_atime': True, + 'is_network_compression_enabled': True, + 'owner': 'cluster_admin', + 'restart': 'default', + 'tries': '7'} + + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify_with_rest(): + ''' modifying snapmirror policy without rules via REST. Idempotency was tested in create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + ('PATCH', 'snapmirror/policies/abcdef12-3456-7890-abcd-ef1234567890', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'comment': 'old comment', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify_with_rules(): + ''' modifying snapmirror policy with rules. Idempotency was tested in create ''' + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['snapmirror-policy-info']), + ('ZAPI', 'snapmirror-policy-add-rule', ZRR['success']), + ('ZAPI', 'snapmirror-policy-add-rule', ZRR['success']), + ('ZAPI', 'snapmirror-policy-add-rule', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify_with_rules_via_rest(): + ''' modifying snapmirror policy with rules via rest. Idempotency was tested in create ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + ('PATCH', 'snapmirror/policies/abcdef12-3456-7890-abcd-ef1234567890', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'snapmirror-policy-get-iter', ZRR['error']), + ('ZAPI', 'snapmirror-policy-create', ZRR['error']), + ('ZAPI', 'snapmirror-policy-delete', ZRR['error']), + ('ZAPI', 'snapmirror-policy-modify', ZRR['error']), + ('ZAPI', 'snapmirror-policy-remove-rule', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'common_snapshot_schedule': 'sched', + 'policy_type': 'sync_mirror', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = zapi_error_message('Error getting snapmirror policy ansible') + assert error in expect_and_capture_ansible_exception(my_obj.get_snapmirror_policy, 'fail')['msg'] + error = zapi_error_message('Error creating snapmirror policy ansible') + assert error in expect_and_capture_ansible_exception(my_obj.create_snapmirror_policy, 'fail')['msg'] + error = zapi_error_message('Error deleting snapmirror policy ansible') + assert error in expect_and_capture_ansible_exception(my_obj.delete_snapmirror_policy, 'fail')['msg'] + error = zapi_error_message('Error modifying snapmirror policy ansible') + assert error in expect_and_capture_ansible_exception(my_obj.modify_snapmirror_policy, 'fail')['msg'] + module_args = { + 'use_rest': 'never', + 'common_snapshot_schedule': 'sched', + 'policy_type': 'sync_mirror', + 'snapmirror_label': ['lbl1'], + 'keep': [24], + } + current = { + 'snapmirror_label': ['lbl2'], + 'keep': [24], + 'prefix': [''], + 'schedule': ['weekly'], + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = zapi_error_message('Error modifying snapmirror policy rule ansible') + assert error in expect_and_capture_ansible_exception(my_obj.modify_snapmirror_policy_rules, 'fail', current)['msg'] + + +def test_if_all_methods_catch_exception_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['generic_error']), + ('POST', 'snapmirror/policies', SRR['generic_error']), + ('DELETE', 'snapmirror/policies/uuid', SRR['generic_error']), + ('PATCH', 'snapmirror/policies/uuid', SRR['generic_error']), + # modifying rules + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('PATCH', 'snapmirror/policies/uuid', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + 'policy_type': 'sync_mirror', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = rest_error_message('Error getting snapmirror policy', 'snapmirror/policies') + assert error in expect_and_capture_ansible_exception(my_obj.get_snapmirror_policy_rest, 'fail')['msg'] + error = rest_error_message('Error creating snapmirror policy', 'snapmirror/policies') + assert error in expect_and_capture_ansible_exception(my_obj.create_snapmirror_policy, 'fail')['msg'] + error = rest_error_message('Error deleting snapmirror policy', 'snapmirror/policies/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.delete_snapmirror_policy, 'fail', 'uuid')['msg'] + error = rest_error_message('Error modifying snapmirror policy', 'snapmirror/policies/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.modify_snapmirror_policy, 'fail', 'uuid', {'key': 'value'})['msg'] + module_args = { + 'use_rest': 'always', + 'policy_type': 'sync_mirror', + 'snapmirror_label': ['lbl1'], + 'keep': [24], + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = rest_error_message('Error modifying snapmirror policy rules', 'snapmirror/policies/uuid') + assert error in expect_and_capture_ansible_exception(my_obj.modify_snapmirror_policy_rules, 'fail', None, 'uuid')['msg'] + + +def test_create_snapmirror_policy_retention_obj_for_rest(): + ''' test create_snapmirror_policy_retention_obj_for_rest ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + # Test no rules + assert my_obj.create_snapmirror_policy_retention_obj_for_rest() == [] + + # Test one rule + rules = [{'snapmirror_label': 'daily', 'keep': 7}] + retention_obj = [{'label': 'daily', 'count': '7'}] + assert my_obj.create_snapmirror_policy_retention_obj_for_rest(rules) == retention_obj + + # Test two rules, with a prefix + rules = [{'snapmirror_label': 'daily', 'keep': 7}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly'}] + retention_obj = [{'label': 'daily', 'count': '7'}, + {'label': 'weekly', 'count': '5', 'prefix': 'weekly'}] + assert my_obj.create_snapmirror_policy_retention_obj_for_rest(rules) == retention_obj + + # Test three rules, with a prefix & schedule + rules = [{'snapmirror_label': 'daily', 'keep': 7}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}] + retention_obj = [{'label': 'daily', 'count': '7'}, + {'label': 'weekly', 'count': '5', 'prefix': 'weekly_sv'}, + {'label': 'monthly', 'count': '12', 'prefix': 'monthly_sv', 'creation_schedule': {'name': 'monthly'}}] + assert my_obj.create_snapmirror_policy_retention_obj_for_rest(rules) == retention_obj + + +def test_identify_snapmirror_policy_rules_with_schedule(): + ''' test identify_snapmirror_policy_rules_with_schedule ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + # Test no rules + assert my_obj.identify_snapmirror_policy_rules_with_schedule() == ([], []) + + # Test one non-schedule rule identified + rules = [{'snapmirror_label': 'daily', 'keep': 7}] + schedule_rules = [] + non_schedule_rules = [{'snapmirror_label': 'daily', 'keep': 7}] + assert my_obj.identify_snapmirror_policy_rules_with_schedule(rules) == (schedule_rules, non_schedule_rules) + + # Test one schedule and two non-schedule rules identified + rules = [{'snapmirror_label': 'daily', 'keep': 7}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}] + schedule_rules = [{'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}] + non_schedule_rules = [{'snapmirror_label': 'daily', 'keep': 7}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv'}] + assert my_obj.identify_snapmirror_policy_rules_with_schedule(rules) == (schedule_rules, non_schedule_rules) + + # Test three schedule & zero non-schedule rules identified + rules = [{'snapmirror_label': 'daily', 'keep': 7, 'schedule': 'daily'}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv', 'schedule': 'weekly'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}] + schedule_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'schedule': 'daily'}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly_sv', 'schedule': 'weekly'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly_sv', 'schedule': 'monthly'}] + non_schedule_rules = [] + assert my_obj.identify_snapmirror_policy_rules_with_schedule(rules) == (schedule_rules, non_schedule_rules) + + +def test_identify_new_snapmirror_policy_rules(): + ''' test identify_new_snapmirror_policy_rules ''' + register_responses([ + ]) + + # Test with no rules in parameters. new_rules should always be []. + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + current = None + new_rules = [] + assert my_obj.identify_new_snapmirror_policy_rules(current) == new_rules + + current = {'snapmirror_label': ['daily'], 'keep': [7], 'prefix': [''], 'schedule': ['']} + new_rules = [] + assert my_obj.identify_new_snapmirror_policy_rules(current) == new_rules + + # Test with rules in parameters. + module_args = { + 'use_rest': 'never', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + # Test three new rules identified when no rules currently exist + current = None + new_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}] + assert my_obj.identify_new_snapmirror_policy_rules(current) == new_rules + + # Test two new rules identified and one rule already exists + current = {'snapmirror_label': ['daily'], 'keep': [7], 'prefix': [''], 'schedule': ['']} + new_rules = [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}] + assert my_obj.identify_new_snapmirror_policy_rules(current) == new_rules + + # Test one new rule identified and two rules already exist + current = {'snapmirror_label': ['daily', 'monthly'], + 'keep': [7, 12], + 'prefix': ['', 'monthly'], + 'schedule': ['', 'monthly']} + new_rules = [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}] + assert my_obj.identify_new_snapmirror_policy_rules(current) == new_rules + + # Test no new rules identified as all rules already exist + current = {'snapmirror_label': ['daily', 'monthly', 'weekly'], + 'keep': [7, 12, 5], + 'prefix': ['', 'monthly', 'weekly'], + 'schedule': ['', 'monthly', 'weekly']} + new_rules = [] + assert my_obj.identify_new_snapmirror_policy_rules(current) == new_rules + + +def test_identify_obsolete_snapmirror_policy_rules(): + ''' test identify_obsolete_snapmirror_policy_rules ''' + register_responses([ + ]) + + # Test with no rules in parameters. obsolete_rules should always be []. + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + current = None + obsolete_rules = [] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + current = {'snapmirror_label': ['daily'], 'keep': [7], 'prefix': [''], 'schedule': ['']} + obsolete_rules = [] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + # Test removing all rules. obsolete_rules should equal current. + module_args = { + 'use_rest': 'never', + 'snapmirror_label': [] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + current = {'snapmirror_label': ['monthly', 'weekly', 'hourly', 'daily', 'yearly'], + 'keep': [12, 5, 24, 7, 7], + 'prefix': ['monthly', 'weekly', '', '', 'yearly'], + 'schedule': ['monthly', 'weekly', '', '', 'yearly']} + obsolete_rules = [{'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}, + {'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'yearly', 'keep': 7, 'prefix': 'yearly', 'schedule': 'yearly'}] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + # Test with rules in parameters. + module_args = { + 'use_rest': 'never', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + # Test no rules exist, thus no obsolete rules + current = None + obsolete_rules = [] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + # Test new rules and one obsolete rule identified + current = {'snapmirror_label': ['hourly'], 'keep': [24], 'prefix': [''], 'schedule': ['']} + obsolete_rules = [{'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''}] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + # Test new rules, with one retained and one obsolete rule identified + current = {'snapmirror_label': ['hourly', 'daily'], + 'keep': [24, 7], + 'prefix': ['', ''], + 'schedule': ['', '']} + obsolete_rules = [{'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''}] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + # Test new rules and two obsolete rules identified + current = {'snapmirror_label': ['monthly', 'weekly', 'hourly', 'daily', 'yearly'], + 'keep': [12, 5, 24, 7, 7], + 'prefix': ['monthly', 'weekly', '', '', 'yearly'], + 'schedule': ['monthly', 'weekly', '', '', 'yearly']} + obsolete_rules = [{'snapmirror_label': 'hourly', 'keep': 24, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'yearly', 'keep': 7, 'prefix': 'yearly', 'schedule': 'yearly'}] + assert my_obj.identify_obsolete_snapmirror_policy_rules(current) == obsolete_rules + + +def test_identify_modified_snapmirror_policy_rules(): + ''' test identify_modified_snapmirror_policy_rules ''' + register_responses([ + + ]) + + # Test with no rules in parameters. modified_rules & unmodified_rules should always be []. + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + current = None + modified_rules, unmodified_rules = [], [] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + current = {'snapmirror_label': ['daily'], 'keep': [14], 'prefix': ['daily'], 'schedule': ['daily']} + modified_rules, unmodified_rules = [], [] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + # Test removing all rules. modified_rules & unmodified_rules should be []. + module_args = { + 'use_rest': 'never', + 'snapmirror_label': [] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + current = {'snapmirror_label': ['monthly', 'weekly', 'hourly', 'daily', 'yearly'], + 'keep': [12, 5, 24, 7, 7], + 'prefix': ['monthly', 'weekly', '', '', 'yearly'], + 'schedule': ['monthly', 'weekly', '', '', 'yearly']} + modified_rules, unmodified_rules = [], [] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + # Test with rules in parameters. + module_args = { + 'use_rest': 'never', + 'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'schedule': ['', 'weekly', 'monthly'], + 'prefix': ['', 'weekly', 'monthly'] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + + # Test no rules exist, thus no modified & unmodified rules + current = None + modified_rules, unmodified_rules = [], [] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + # Test new rules don't exist, thus no modified & unmodified rules + current = {'snapmirror_label': ['hourly'], 'keep': [24], 'prefix': [''], 'schedule': ['']} + modified_rules, unmodified_rules = [], [] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + # Test daily & monthly modified, weekly unmodified + current = {'snapmirror_label': ['hourly', 'daily', 'weekly', 'monthly'], + 'keep': [24, 14, 5, 6], + 'prefix': ['', 'daily', 'weekly', 'monthly'], + 'schedule': ['', 'daily', 'weekly', 'monthly']} + modified_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}] + unmodified_rules = [{'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + # Test all rules modified + current = {'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [14, 10, 6], + 'prefix': ['', '', ''], + 'schedule': ['daily', 'weekly', 'monthly']} + modified_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}] + unmodified_rules = [] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + # Test all rules unmodified + current = {'snapmirror_label': ['daily', 'weekly', 'monthly'], + 'keep': [7, 5, 12], + 'prefix': ['', 'weekly', 'monthly'], + 'schedule': ['', 'weekly', 'monthly']} + modified_rules = [] + unmodified_rules = [{'snapmirror_label': 'daily', 'keep': 7, 'prefix': '', 'schedule': ''}, + {'snapmirror_label': 'weekly', 'keep': 5, 'prefix': 'weekly', 'schedule': 'weekly'}, + {'snapmirror_label': 'monthly', 'keep': 12, 'prefix': 'monthly', 'schedule': 'monthly'}] + assert my_obj.identify_modified_snapmirror_policy_rules(current), (modified_rules == unmodified_rules) + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.HAS_NETAPP_LIB', False) +def test_module_fail_when_netapp_lib_missing(): + ''' required lib missing ''' + module_args = { + 'use_rest': 'never', + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_validate_parameters(): + ''' test test_validate_parameters ''' + register_responses([ + ]) + + args = dict(DEFAULT_ARGS) + args.pop('vserver') + module_args = { + 'use_rest': 'never', + } + error = 'Error: vserver is a required parameter when using ZAPI.' + assert error in create_module(my_module, args, module_args, fail=True)['msg'] + + module_args = { + 'use_rest': 'never', + 'snapmirror_label': list(range(11)), + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = 'Error: A SnapMirror Policy can have up to a maximum of' + assert error in expect_and_capture_ansible_exception(my_obj.validate_parameters, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'snapmirror_label': list(range(10)), + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error: Missing 'keep' parameter. When specifying the 'snapmirror_label' parameter, the 'keep' parameter must also be supplied" + assert error in expect_and_capture_ansible_exception(my_obj.validate_parameters, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'snapmirror_label': list(range(10)), + 'keep': list(range(9)), + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error: Each 'snapmirror_label' value must have an accompanying 'keep' value" + assert error in expect_and_capture_ansible_exception(my_obj.validate_parameters, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'snapmirror_label': list(range(10)), + 'keep': list(range(11)), + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error: Each 'keep' value must have an accompanying 'snapmirror_label' value" + assert error in expect_and_capture_ansible_exception(my_obj.validate_parameters, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'keep': list(range(11)), + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error: Missing 'snapmirror_label' parameter. When specifying the 'keep' parameter, the 'snapmirror_label' parameter must also be supplied" + assert error in expect_and_capture_ansible_exception(my_obj.validate_parameters, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'snapmirror_label': list(range(10)), + 'keep': list(range(10)), + 'prefix': list(range(10)), + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error: Missing 'schedule' parameter. When specifying the 'prefix' parameter, the 'schedule' parameter must also be supplied" + assert error in expect_and_capture_ansible_exception(my_obj.validate_parameters, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'identity_preservation': 'full', + } + error = 'Error: identity_preservation option is not supported with ZAPI. It can only be used with REST.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + module_args = { + 'use_rest': 'never', + 'copy_all_source_snapshots': True, + } + error = 'Error: copy_all_source_snapshots option is not supported with ZAPI. It can only be used with REST.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_validate_parameters_rest(): + ''' test test_validate_parameters ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + # copy_all_source_snapshots + ('GET', 'cluster', SRR['is_rest_9_10_1']), + # copy_latest_source_snapshot + ('GET', 'cluster', SRR['is_rest_9_11_1']), + # create_snapshot_on_source + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ]) + + module_args = { + 'use_rest': 'always', + 'policy_type': 'sync_mirror', + 'is_network_compression_enabled': True + } + error = 'Error: input parameter network_compression_enabled is not valid for SnapMirror policy type sync' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + module_args = { + 'use_rest': 'always', + 'policy_type': 'sync_mirror', + 'identity_preservation': 'full' + } + error = 'Error: identity_preservation is only supported with async (async) policy_type, got: sync' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + module_args = { + 'use_rest': 'always', + 'policy_type': 'async_mirror', + 'is_network_compression_enabled': True, + 'identity_preservation': 'full' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + module_args = { + 'use_rest': 'always', + 'policy_type': 'async_mirror', + 'copy_all_source_snapshots': False, + } + error = 'Error: the property copy_all_source_snapshots can only be set to true when present' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + module_args = { + 'use_rest': 'always', + 'policy_type': 'async_mirror', + 'copy_latest_source_snapshot': False, + } + error = 'Error: the property copy_latest_source_snapshot can only be set to true when present' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + module_args = { + 'use_rest': 'always', + 'policy_type': 'vault', + 'create_snapshot_on_source': True, + } + error = 'Error: the property create_snapshot_on_source can only be set to false when present' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_errors_in_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_sync']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async']), + ]) + module_args = { + 'use_rest': 'always', + } + error = 'Error: policy ansible not present after create.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + # change in policy type + module_args = { + 'use_rest': 'always', + 'policy_type': 'async_mirror', + } + error = 'Error: The policy property policy_type cannot be modified from sync to async' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + 'use_rest': 'always', + 'policy_type': 'sync_mirror', + } + error = 'Error: The policy property policy_type cannot be modified from async to sync' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_errors_in_create_with_copy_snapshots(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = { + 'use_rest': 'always', + 'copy_all_source_snapshots': True, + 'policy_type': 'sync_mirror' + } + msg = 'Error: option copy_all_source_snapshots is not supported with policy type sync_mirror.' + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg in error + + +def test_errors_in_create_with_copy_latest_snapshots(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ]) + module_args = { + 'use_rest': 'always', + 'copy_latest_source_snapshot': True, + 'policy_type': 'async', + 'snapmirror_label': ["daily", "weekly"], + } + msg = 'Error: Retention properties cannot be specified along with copy_all_source_snapshots or copy_latest_source_snapshot properties' + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg in error + + +def test_errors_in_create_snapshot_on_source(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ]) + module_args = { + 'use_rest': 'always', + 'create_snapshot_on_source': False, + 'policy_type': 'sync_mirror', + 'snapmirror_label': ["daily", "weekly"], + 'keep': ["7", "2"], + } + msg = 'Error: option create_snapshot_on_source is not supported with policy type sync_mirror.' + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg in error + + module_args = { + 'use_rest': 'always', + 'create_snapshot_on_source': False, + 'policy_type': 'async', + 'snapmirror_label': ["daily", "weekly"], + } + msg = 'Error: The properties snapmirror_label and keep must be specified with' + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg in error + + +def test_async_create_snapshot_on_source(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_12_1']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['empty_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_async_with_create_snapshot_on_source']), + ]) + module_args = { + 'use_rest': 'always', + 'create_snapshot_on_source': False, + 'policy_type': 'vault', + 'snapmirror_label': ["daily", "weekly"], + 'keep': ["7", "2"], + 'prefix': ["p1", "p2"], + 'schedule': ["daily", "weekly"], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_get_snapmirror_policy_sync_with_sync_type(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'snapmirror/policies', SRR['empty_records']), + ('POST', 'snapmirror/policies', SRR['success']), + ('GET', 'snapmirror/policies', SRR['get_snapmirror_policy_sync_with_sync_type']), + ]) + module_args = { + 'use_rest': 'always', + 'policy_type': 'sync_mirror', + 'sync_type': 'automated_failover' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_set_scope(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['zero_records']), + # first test + ('GET', 'svm/svms', SRR['zero_records']), + ('GET', 'svm/svms', SRR['one_vserver_record']), + ('GET', 'svm/svms', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + # vserver not found + assert my_obj.set_scope() == 'cluster' + # vserver found + assert my_obj.set_scope() == 'svm' + # API error + error = rest_error_message('Error getting vserver ansible info', 'svm/svms') + assert error in expect_and_capture_ansible_exception(my_obj.set_scope, 'fail')['msg'] + # no vserver + my_obj.parameters.pop('vserver') + assert my_obj.set_scope() == 'cluster' + + +def check_mapping(my_obj, policy_type, expected_policy_type, copy_latest_source_snapshot, copy_all_source_snapshots, create_snapshot_on_source, retention): + my_obj.parameters['policy_type'] = policy_type + if copy_latest_source_snapshot is None: + my_obj.parameters.pop('copy_latest_source_snapshot', None) + else: + my_obj.parameters['copy_latest_source_snapshot'] = copy_latest_source_snapshot + if copy_all_source_snapshots is None: + my_obj.parameters.pop('copy_all_source_snapshots', None) + else: + my_obj.parameters['copy_all_source_snapshots'] = copy_all_source_snapshots + if create_snapshot_on_source is None: + my_obj.parameters.pop('create_snapshot_on_source', None) + else: + my_obj.parameters['create_snapshot_on_source'] = create_snapshot_on_source + if retention is None: + my_obj.parameters.pop('snapmirror_label', None) + my_obj.parameters.pop('keep', None) + my_obj.parameters.pop('prefix', None) + my_obj.parameters.pop('schedule', None) + else: + for key, value in retention.items(): + my_obj.parameters[key] = value + my_obj.validate_policy_type() + assert my_obj.parameters['policy_type'] == expected_policy_type + + +def check_options(my_obj, copy_latest_source_snapshot, copy_all_source_snapshots, create_snapshot_on_source): + if copy_latest_source_snapshot is None: + assert 'copy_latest_source_snapshot' not in my_obj.parameters + else: + assert my_obj.parameters['copy_latest_source_snapshot'] == copy_latest_source_snapshot + if copy_all_source_snapshots is None: + assert 'copy_all_source_snapshots' not in my_obj.parameters + else: + assert my_obj.parameters['copy_all_source_snapshots'] == copy_all_source_snapshots + if create_snapshot_on_source is None: + assert 'create_snapshot_on_source' not in my_obj.parameters + else: + assert my_obj.parameters['create_snapshot_on_source'] == create_snapshot_on_source + + +def test_validate_policy_type(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['zero_records']), + # first test + ]) + module_args = { + 'use_rest': 'always', + } + retention = { + 'snapmirror_label': ["daily", "weekly"], + 'keep': ["7", "2"] + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + check_mapping(my_obj, 'async', 'async', None, None, None, None) + check_options(my_obj, None, None, None) + check_mapping(my_obj, 'mirror_vault', 'async', None, None, None, None) + check_options(my_obj, None, None, None) + check_mapping(my_obj, 'vault', 'async', None, None, None, retention) + check_options(my_obj, None, None, False) + check_mapping(my_obj, 'async_mirror', 'async', None, None, None, None) + check_options(my_obj, True, None, None) + check_mapping(my_obj, 'sync', 'sync', None, None, None, None) + check_options(my_obj, None, None, None) + check_mapping(my_obj, 'sync_mirror', 'sync', None, None, None, None) + check_options(my_obj, None, None, None) + check_mapping(my_obj, 'strict_sync_mirror', 'sync', None, None, None, None) + check_options(my_obj, None, None, None) + + my_obj.parameters['policy_type'] = 'async' + my_obj.parameters['sync_type'] = 'strict_sync' + error = "Error: 'sync_type' is only applicable for sync policy_type" + assert error in expect_and_capture_ansible_exception(my_obj.validate_policy_type, 'fail')['msg'] + + module_args = { + 'use_rest': 'never', + 'policy_type': 'sync' + } + error = 'Error: The policy types async and sync are not supported in ZAPI.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_build_body_for_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['zero_records']), + # first test + ]) + module_args = { + 'use_rest': 'always', + 'snapmirror_label': ["daily", "weekly"], + 'keep': ["7", "2"], + 'copy_all_source_snapshots': True + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + body = my_obj.build_body_for_create() + assert 'copy_all_source_snapshots' in body + + +def test_modify_snapmirror_policy_rules_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'svm/svms', SRR['zero_records']), + # first test + ]) + module_args = { + 'use_rest': 'always', + 'snapmirror_label': ["daily", "weekly"], + 'keep': ["7", "2"], + 'copy_all_source_snapshots': True + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.modify_snapmirror_policy_rules_rest('uuid', [], ['umod'], [], []) is None diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py new file mode 100644 index 000000000..f7c49eaad --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot.py @@ -0,0 +1,363 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_nvme_snapshot''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_and_apply, create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot \ + import NetAppOntapSnapshot as my_module, main as my_main + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +SRR = rest_responses({ + 'volume_uuid': (200, + {'records': [{"uuid": "test_uuid"}], 'num_records': 1}, None, + ), + 'snapshot_record': (200, + {'records': [{"volume": {"uuid": "d9cd4ec5-c96d-11eb-9271-005056b3ef5a", + "name": "ansible_vol"}, + "uuid": "343b5227-8c6b-4e79-a133-304bbf7537ce", + "svm": {"uuid": "b663d6f0-c96d-11eb-9271-005056b3ef5a", + "name": "ansible"}, + "name": "ss1", + "create_time": "2021-06-10T17:24:41-04:00", + "comment": "123", + "expiry_time": "2022-02-04T14:00:00-05:00", + "snapmirror_label": "321", }], 'num_records': 1}, None), + 'create_response': (200, {'job': {'uuid': 'd0b3eefe-cd59-11eb-a170-005056b338cd', + '_links': { + 'self': {'href': '/api/cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd'}}}}, + None), + 'job_response': (200, {'uuid': 'e43a40db-cd61-11eb-a170-005056b338cd', + 'description': 'PATCH /api/storage/volumes/d9cd4ec5-c96d-11eb-9271-005056b3ef5a/' + 'snapshots/da995362-cd61-11eb-a170-005056b338cd', + 'state': 'success', + 'message': 'success', + 'code': 0, + 'start_time': '2021-06-14T18:43:08-04:00', + 'end_time': '2021-06-14T18:43:08-04:00', + 'svm': {'name': 'ansible', 'uuid': 'b663d6f0-c96d-11eb-9271-005056b3ef5a', + '_links': {'self': {'href': '/api/svm/svms/b663d6f0-c96d-11eb-9271-005056b3ef5a'}}}, + '_links': {'self': {'href': '/api/cluster/jobs/e43a40db-cd61-11eb-a170-005056b338cd'}}}, + None) +}, allow_override=False) + + +snapshot_info = { + 'num-records': 1, + 'attributes-list': { + 'snapshot-info': { + 'comment': 'new comment', + 'name': 'ansible', + 'snapmirror-label': 'label12' + } + } +} + +ZRR = zapi_responses({ + 'get_snapshot': build_zapi_response(snapshot_info) +}) + + +DEFAULT_ARGS = { + 'state': 'present', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'comment': 'test comment', + 'snapshot': 'test_snapshot', + 'snapmirror_label': 'test_label', + 'volume': 'test_vol' +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + error = create_module(my_module, fail=True)['msg'] + assert 'missing required arguments:' in error + for arg in ('hostname', 'snapshot', 'volume', 'vserver'): + assert arg in error + + +def test_ensure_get_called(): + ''' test get_snapshot() for non-existent snapshot''' + register_responses([ + ('snapshot-get-iter', ZRR['empty']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_snapshot() is None + + +def test_ensure_get_called_existing(): + ''' test get_snapshot() for existing snapshot''' + register_responses([ + ('snapshot-get-iter', ZRR['get_snapshot']), + ]) + module_args = { + 'use_rest': 'never', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_snapshot() + + +def test_successful_create(): + ''' creating snapshot and testing idempotency ''' + register_responses([ + ('snapshot-get-iter', ZRR['empty']), + ('snapshot-create', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'async_bool': True + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + ''' modifying snapshot and testing idempotency ''' + register_responses([ + ('snapshot-get-iter', ZRR['get_snapshot']), + ('snapshot-modify-iter', ZRR['success']), + ('snapshot-get-iter', ZRR['get_snapshot']), + ]) + module_args = { + 'use_rest': 'never', + 'comment': 'adding comment', + 'snapmirror_label': 'label22', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + module_args = { + 'use_rest': 'never', + 'comment': 'new comment', + 'snapmirror_label': 'label12', + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_rename(): + ''' modifying snapshot and testing idempotency ''' + register_responses([ + ('snapshot-get-iter', ZRR['empty']), + ('snapshot-get-iter', ZRR['get_snapshot']), + ('snapshot-rename', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'from_name': 'from_snapshot', + 'comment': 'new comment', + 'snapmirror_label': 'label12', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_delete(): + ''' deleting snapshot and testing idempotency ''' + register_responses([ + ('snapshot-get-iter', ZRR['get_snapshot']), + ('snapshot-delete', ZRR['success']), + ('snapshot-get-iter', ZRR['empty']), + ]) + module_args = { + 'use_rest': 'never', + 'state': 'absent', + 'ignore_owners': True, + 'snapshot_instance_uuid': 'uuid', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('snapshot-get-iter', ZRR['error']), + ('snapshot-create', ZRR['error']), + ('snapshot-delete', ZRR['error']), + ('snapshot-modify-iter', ZRR['error']), + ('snapshot-rename', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), # get version + ('GET', 'storage/volumes/None/snapshots', SRR['generic_error']), + ('POST', 'storage/volumes/None/snapshots', SRR['generic_error']), + ('DELETE', 'storage/volumes/None/snapshots/None', SRR['generic_error']), + ('PATCH', 'storage/volumes/None/snapshots/None', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']) + ]) + module_args = { + 'use_rest': 'never', + 'from_name': 'from_snapshot'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'Error fetching snapshot' in expect_and_capture_ansible_exception(my_obj.get_snapshot, 'fail')['msg'] + assert 'Error creating snapshot test_snapshot:' in expect_and_capture_ansible_exception(my_obj.create_snapshot, 'fail')['msg'] + assert 'Error deleting snapshot test_snapshot:' in expect_and_capture_ansible_exception(my_obj.delete_snapshot, 'fail')['msg'] + assert 'Error modifying snapshot test_snapshot:' in expect_and_capture_ansible_exception(my_obj.modify_snapshot, 'fail')['msg'] + assert 'Error renaming snapshot from_snapshot to test_snapshot:' in expect_and_capture_ansible_exception(my_obj.rename_snapshot, 'fail')['msg'] + module_args = {'use_rest': 'always'} + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'Error fetching snapshot' in expect_and_capture_ansible_exception(my_obj.get_snapshot, 'fail')['msg'] + assert 'Error when creating snapshot:' in expect_and_capture_ansible_exception(my_obj.create_snapshot, 'fail')['msg'] + assert 'Error when deleting snapshot:' in expect_and_capture_ansible_exception(my_obj.delete_snapshot, 'fail')['msg'] + assert 'Error when modifying snapshot:' in expect_and_capture_ansible_exception(my_obj.modify_snapshot, 'fail')['msg'] + assert 'Error getting volume info:' in expect_and_capture_ansible_exception(my_obj.get_volume_uuid, 'fail')['msg'] + + +def test_module_fail_rest_ONTAP96(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) # get version + ]) + module_args = {'use_rest': 'always'} + msg = 'Error: Minimum version of ONTAP for snapmirror_label is (9, 7)' + assert msg in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_successfully_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['empty_records']), + ('POST', 'storage/volumes/test_uuid/snapshots', SRR['create_response']), + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_response']), + ]) + module_args = { + 'use_rest': 'always', + 'expiry_time': 'expiry' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_create_no_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['empty_records']), + ]) + module_args = {'use_rest': 'always'} + msg = 'Error: volume test_vol not found for vserver vserver.' + assert msg == create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_successfully_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['snapshot_record']), + ('PATCH', 'storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce', SRR['create_response']), # modify + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_response']), + ]) + module_args = { + 'use_rest': 'always', + 'comment': 'new comment', + 'expiry_time': 'expiry' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_rename(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['empty_records']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['snapshot_record']), + ('PATCH', 'storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce', SRR['create_response']), # modify + ('GET', 'cluster/jobs/d0b3eefe-cd59-11eb-a170-005056b338cd', SRR['job_response']), + ]) + module_args = { + 'use_rest': 'always', + 'from_name': 'old_snapshot'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_rename_from_not_found(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['empty_records']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['empty_records']), + ]) + module_args = { + 'use_rest': 'always', + 'from_name': 'old_snapshot'} + msg = 'Error renaming snapshot: test_snapshot - no snapshot with from_name: old_snapshot.' + assert msg == create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_successfully_delete(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['snapshot_record']), + ('DELETE', 'storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce', SRR['success']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_delete(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['snapshot_record']), + ('DELETE', 'storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent'} + msg = 'Error when deleting snapshot: calling: storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce: got Expected error.' + assert msg == create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_call_main(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/volumes', SRR['volume_uuid']), + ('GET', 'storage/volumes/test_uuid/snapshots', SRR['snapshot_record']), + ('DELETE', 'storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + 'state': 'absent'} + msg = 'Error when deleting snapshot: calling: storage/volumes/test_uuid/snapshots/343b5227-8c6b-4e79-a133-304bbf7537ce: got Expected error.' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_unsupported_options(): + module_args = { + 'use_rest': 'always', + 'ignore_owners': True} + error = "REST API currently does not support 'ignore_owners'" + assert error == create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + 'use_rest': 'never', + 'expiry_time': 'any'} + error = "expiry_time is currently only supported with REST on Ontap 9.6 or higher" + assert error == create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + module_args = { + 'use_rest': 'never', + } + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py new file mode 100644 index 000000000..84d928f19 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy.py @@ -0,0 +1,658 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_snapshot_policy''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy \ + import NetAppOntapSnapshotPolicy as my_module + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'policy': + xml = self.build_snapshot_policy_info() + elif self.type == 'snapshot_policy_info_policy_disabled': + xml = self.build_snapshot_policy_info_policy_disabled() + elif self.type == 'snapshot_policy_info_comment_modified': + xml = self.build_snapshot_policy_info_comment_modified() + elif self.type == 'snapshot_policy_info_schedules_added': + xml = self.build_snapshot_policy_info_schedules_added() + elif self.type == 'snapshot_policy_info_schedules_deleted': + xml = self.build_snapshot_policy_info_schedules_deleted() + elif self.type == 'snapshot_policy_info_modified_schedule_counts': + xml = self.build_snapshot_policy_info_modified_schedule_counts() + elif self.type == 'policy_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + def asup_log_for_cserver(self): + ''' mock autosupport log''' + return None + + @staticmethod + def build_snapshot_policy_info(): + ''' build xml data for snapshot-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': { + 'snapshot-policy-info': { + 'comment': 'new comment', + 'enabled': 'true', + 'policy': 'ansible', + 'snapshot-policy-schedules': { + 'snapshot-schedule-info': { + 'count': 100, + 'schedule': 'hourly', + 'prefix': 'hourly', + 'snapmirror-label': '' + } + }, + 'vserver-name': 'hostname' + } + }} + xml.translate_struct(data) + return xml + + @staticmethod + def build_snapshot_policy_info_comment_modified(): + ''' build xml data for snapshot-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': { + 'snapshot-policy-info': { + 'comment': 'modified comment', + 'enabled': 'true', + 'policy': 'ansible', + 'snapshot-policy-schedules': { + 'snapshot-schedule-info': { + 'count': 100, + 'schedule': 'hourly', + 'prefix': 'hourly', + 'snapmirror-label': '' + } + }, + 'vserver-name': 'hostname' + } + }} + xml.translate_struct(data) + return xml + + @staticmethod + def build_snapshot_policy_info_policy_disabled(): + ''' build xml data for snapshot-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': { + 'snapshot-policy-info': { + 'comment': 'new comment', + 'enabled': 'false', + 'policy': 'ansible', + 'snapshot-policy-schedules': { + 'snapshot-schedule-info': { + 'count': 100, + 'schedule': 'hourly', + 'prefix': 'hourly', + 'snapmirror-label': '' + } + }, + 'vserver-name': 'hostname' + } + }} + xml.translate_struct(data) + return xml + + @staticmethod + def build_snapshot_policy_info_schedules_added(): + ''' build xml data for snapshot-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': { + 'snapshot-policy-info': { + 'comment': 'new comment', + 'enabled': 'true', + 'policy': 'ansible', + 'snapshot-policy-schedules': [ + { + 'snapshot-schedule-info': { + 'count': 100, + 'schedule': 'hourly', + 'prefix': 'hourly', + 'snapmirror-label': '' + } + }, + { + 'snapshot-schedule-info': { + 'count': 5, + 'schedule': 'daily', + 'prefix': 'daily', + 'snapmirror-label': 'daily' + } + }, + { + 'snapshot-schedule-info': { + 'count': 10, + 'schedule': 'weekly', + 'prefix': 'weekly', + 'snapmirror-label': '' + } + } + ], + 'vserver-name': 'hostname' + } + }} + xml.translate_struct(data) + return xml + + @staticmethod + def build_snapshot_policy_info_schedules_deleted(): + ''' build xml data for snapshot-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': { + 'snapshot-policy-info': { + 'comment': 'new comment', + 'enabled': 'true', + 'policy': 'ansible', + 'snapshot-policy-schedules': [ + { + 'snapshot-schedule-info': { + 'schedule': 'daily', + 'prefix': 'daily', + 'count': 5, + 'snapmirror-label': 'daily' + } + } + ], + 'vserver-name': 'hostname' + } + }} + xml.translate_struct(data) + return xml + + @staticmethod + def build_snapshot_policy_info_modified_schedule_counts(): + ''' build xml data for snapshot-policy-info ''' + xml = netapp_utils.zapi.NaElement('xml') + data = {'num-records': 1, + 'attributes-list': { + 'snapshot-policy-info': { + 'comment': 'new comment', + 'enabled': 'true', + 'policy': 'ansible', + 'snapshot-policy-schedules': [ + { + 'snapshot-schedule-info': { + 'count': 10, + 'schedule': 'hourly', + 'prefix': 'hourly', + 'snapmirror-label': '' + } + }, + { + 'snapshot-schedule-info': { + 'count': 50, + 'schedule': 'daily', + 'prefix': 'daily', + 'snapmirror-label': 'daily' + } + }, + { + 'snapshot-schedule-info': { + 'count': 100, + 'schedule': 'weekly', + 'prefix': 'weekly', + 'snapmirror-label': '' + } + } + ], + 'vserver-name': 'hostname' + } + }} + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = '1234' + name = 'ansible' + enabled = True + count = 100 + schedule = 'hourly' + prefix = 'hourly' + comment = 'new comment' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + name = 'ansible' + enabled = True + count = 100 + schedule = 'hourly' + prefix = 'hourly' + comment = 'new comment' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'name': name, + 'enabled': enabled, + 'count': count, + 'schedule': schedule, + 'prefix': prefix, + 'comment': comment, + 'use_rest': 'never' + }) + + def set_default_current(self): + default_args = self.set_default_args() + return dict({ + 'name': default_args['name'], + 'enabled': default_args['enabled'], + 'count': [default_args['count']], + 'schedule': [default_args['schedule']], + 'snapmirror_label': [''], + 'prefix': [default_args['prefix']], + 'comment': default_args['comment'], + 'vserver': default_args['hostname'] + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called(self): + ''' test get_snapshot_policy() for non-existent snapshot policy''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + assert my_obj.get_snapshot_policy() is None + + def test_ensure_get_called_existing(self): + ''' test get_snapshot_policy() for existing snapshot policy''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = MockONTAPConnection(kind='policy') + assert my_obj.get_snapshot_policy() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.create_snapshot_policy') + def test_successful_create(self, create_snapshot): + ''' creating snapshot policy and testing idempotency ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + create_snapshot.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy') + def test_successful_modify_comment(self, modify_snapshot): + ''' modifying snapshot policy comment and testing idempotency ''' + data = self.set_default_args() + data['comment'] = 'modified comment' + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + current = self.set_default_current() + modify_snapshot.assert_called_with(current) + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('snapshot_policy_info_comment_modified') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy') + def test_successful_disable_policy(self, modify_snapshot): + ''' disabling snapshot policy and testing idempotency ''' + data = self.set_default_args() + data['enabled'] = False + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + current = self.set_default_current() + modify_snapshot.assert_called_with(current) + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('snapshot_policy_info_policy_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy') + def test_successful_enable_policy(self, modify_snapshot): + ''' enabling snapshot policy and testing idempotency ''' + data = self.set_default_args() + data['enabled'] = True + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('snapshot_policy_info_policy_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + current = self.set_default_current() + current['enabled'] = False + modify_snapshot.assert_called_with(current) + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy') + def test_successful_modify_schedules_add(self, modify_snapshot): + ''' adding snapshot policy schedules and testing idempotency ''' + data = self.set_default_args() + data['schedule'] = ['hourly', 'daily', 'weekly'] + data['prefix'] = ['hourly', 'daily', 'weekly'] + data['count'] = [100, 5, 10] + data['snapmirror_label'] = ['', 'daily', ''] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + current = self.set_default_current() + modify_snapshot.assert_called_with(current) + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('snapshot_policy_info_schedules_added') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy') + def test_successful_modify_schedules_delete(self, modify_snapshot): + ''' deleting snapshot policy schedules and testing idempotency ''' + data = self.set_default_args() + data['schedule'] = ['daily'] + data['prefix'] = ['daily'] + data['count'] = [5] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + current = self.set_default_current() + modify_snapshot.assert_called_with(current) + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('snapshot_policy_info_schedules_deleted') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.modify_snapshot_policy') + def test_successful_modify_schedules(self, modify_snapshot): + ''' modifying snapshot policy schedule counts and testing idempotency ''' + data = self.set_default_args() + data['schedule'] = ['hourly', 'daily', 'weekly'] + data['count'] = [10, 50, 100] + data['prefix'] = ['hourly', 'daily', 'weekly'] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + current = self.set_default_current() + modify_snapshot.assert_called_with(current) + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('snapshot_policy_info_modified_schedule_counts') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy.NetAppOntapSnapshotPolicy.delete_snapshot_policy') + def test_successful_delete(self, delete_snapshot): + ''' deleting snapshot policy and testing idempotency ''' + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('policy') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + delete_snapshot.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_valid_schedule_count(self): + ''' validate when schedule has same number of elements ''' + data = self.set_default_args() + data['schedule'] = ['hourly', 'daily', 'weekly', 'monthly', '5min'] + data['prefix'] = ['hourly', 'daily', 'weekly', 'monthly', '5min'] + data['count'] = [1, 2, 3, 4, 5] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + my_obj.create_snapshot_policy() + create_xml = my_obj.server.xml_in + assert data['count'][2] == int(create_xml['count3']) + assert data['schedule'][4] == create_xml['schedule5'] + + def test_valid_schedule_count_with_snapmirror_labels(self): + ''' validate when schedule has same number of elements with snapmirror labels ''' + data = self.set_default_args() + data['schedule'] = ['hourly', 'daily', 'weekly', 'monthly', '5min'] + data['prefix'] = ['hourly', 'daily', 'weekly', 'monthly', '5min'] + data['count'] = [1, 2, 3, 4, 5] + data['snapmirror_label'] = ['hourly', 'daily', 'weekly', 'monthly', '5min'] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + my_obj.create_snapshot_policy() + create_xml = my_obj.server.xml_in + assert data['count'][2] == int(create_xml['count3']) + assert data['schedule'][4] == create_xml['schedule5'] + assert data['snapmirror_label'][3] == create_xml['snapmirror-label4'] + + def test_invalid_params(self): + ''' validate error when schedule does not have same number of elements ''' + data = self.set_default_args() + data['schedule'] = ['s1', 's2'] + data['prefix'] = ['s1', 's2'] + data['count'] = [1, 2, 3] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert exc.value.args[0]['msg'] == msg + + def test_invalid_schedule_count(self): + ''' validate error when schedule has more than 5 elements ''' + data = self.set_default_args() + data['schedule'] = ['s1', 's2', 's3', 's4', 's5', 's6'] + data['count'] = [1, 2, 3, 4, 5, 6] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert exc.value.args[0]['msg'] == msg + + def test_invalid_schedule_count_less_than_one(self): + ''' validate error when schedule has less than 1 element ''' + data = self.set_default_args() + data['schedule'] = [] + data['count'] = [] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert exc.value.args[0]['msg'] == msg + + def test_invalid_schedule_count_is_none(self): + ''' validate error when schedule is None ''' + data = self.set_default_args() + data['schedule'] = None + data['count'] = None + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert exc.value.args[0]['msg'] == msg + + def test_invalid_schedule_count_with_snapmirror_labels(self): + ''' validate error when schedule with snapmirror labels does not have same number of elements ''' + data = self.set_default_args() + data['schedule'] = ['s1', 's2', 's3'] + data['count'] = [1, 2, 3] + data['snapmirror_label'] = ['sm1', 'sm2'] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + msg = 'Error: Each Snapshot Policy schedule must have an accompanying SnapMirror Label' + assert exc.value.args[0]['msg'] == msg + + def test_invalid_schedule_count_with_prefixes(self): + ''' validate error when schedule with prefixes does not have same number of elements ''' + data = self.set_default_args() + data['schedule'] = ['s1', 's2', 's3'] + data['count'] = [1, 2, 3] + data['prefix'] = ['s1', 's2'] + set_module_args(data) + my_obj = my_module() + my_obj.asup_log_for_cserver = Mock(return_value=None) + if not self.onbox: + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + msg = 'Error: Each Snapshot Policy schedule must have an accompanying prefix' + assert exc.value.args[0]['msg'] == msg + + def test_if_all_methods_catch_exception(self): + module_args = {} + module_args.update(self.set_default_args()) + set_module_args(module_args) + my_obj = my_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('policy_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.create_snapshot_policy() + assert 'Error creating snapshot policy ansible:' in exc.value.args[0]['msg'] + with pytest.raises(AnsibleFailJson) as exc: + my_obj.delete_snapshot_policy() + assert 'Error deleting snapshot policy ansible:' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy_rest.py new file mode 100644 index 000000000..b79507759 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snapshot_policy_rest.py @@ -0,0 +1,481 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_snapshot_policy """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snapshot_policy \ + import NetAppOntapSnapshotPolicy as my_module + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +SRR = rest_responses({ + 'snapshot_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "comment": "modified comment", + "enabled": True, + "name": "policy_name", + "copies": [ + { + "count": 10, + "schedule": { + "name": "hourly" + }, + "prefix": 'hourly', + "snapmirror_label": '' + }, + { + "count": 30, + "schedule": { + "name": "weekly" + }, + "prefix": 'weekly', + "snapmirror_label": '' + } + ], + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + } + ], + "num_records": 1 + }, None), + 'schedule_record': (200, {"records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "comment": "modified comment", + "enabled": 'true', + "name": "policy_name", + "count": 10, + "prefix": "hourly", + "snapmirror_label": '', + "schedule": { + "name": "hourly", + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa" + }, + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + }, + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "ansibleSVM" + }, + "comment": "modified comment", + "enabled": 'true', + "name": "policy_name", + "count": 30, + "prefix": "weekly", + "snapmirror_label": '', + "schedule": { + "name": "weekly", + "uuid": "671aa46e-11ad-11ec-a267-005056b30dsa" + }, + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + } + ], "num_records": 2}, None), +}) + + +ARGS_REST = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'policy_name', + 'vserver': 'ansibleSVM', + 'enabled': True, + 'count': [10, 30], + 'schedule': "hourly,weekly", + 'comment': 'modified comment', + 'use_rest': 'always' +} + +ARGS_REST_no_SVM = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'policy_name', + 'enabled': True, + 'count': [10, 30], + 'schedule': "hourly,weekly", + 'comment': 'modified comment', + 'use_rest': 'always' +} + + +def test_error_get_snapshot_policy_rest(): + ''' Test get error with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on fetching snapshot policy:' in error + + +def test_error_get_snapshot_schedule_rest(): + ''' Test get error with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_good']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['generic_error']) + ]) + module_args = { + 'enabled': False, + 'comment': 'testing policy', + 'name': 'policy2' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on fetching snapshot schedule:' in error + + +def test_module_error_ontap_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + module_args = {'use_rest': 'always'} + msg = create_module(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error: REST requires ONTAP 9.8 or later for snapshot schedules.' == msg + + +def test_create_snapshot_polciy_rest(): + ''' Test create with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['empty_records']), + ('POST', 'storage/snapshot-policies', SRR['empty_good']), + ]) + assert create_and_apply(my_module, ARGS_REST) + + +def test_create_snapshot_polciy_with_snapmirror_label_rest(): + ''' Test create with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['empty_records']), + ('POST', 'storage/snapshot-policies', SRR['empty_good']), + ]) + module_args = { + "snapmirror_label": ['hourly', 'weekly'] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_create_snapshot_polciy_with_prefix_rest(): + ''' Test create with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['empty_records']), + ('POST', 'storage/snapshot-policies', SRR['empty_good']), + ]) + module_args = { + "prefix": ['', ''] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_create_snapshot_polciy_rest(): + ''' Test error create with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['empty_records']), + ('POST', 'storage/snapshot-policies', SRR['generic_error']), + ]) + error = create_and_apply(my_module, ARGS_REST, fail=True)['msg'] + assert 'Error on creating snapshot policy:' in error + + +def test_delete_snapshot_policy_rest(): + ''' Test delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('DELETE', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_delete_snapshot_policy_rest(): + ''' Test error delete with rest API''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('DELETE', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on deleting snapshot policy:' in error + + +def test_modify_snapshot_policy_rest(): + ''' Test modify comment, rename and disable policy with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_good']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']) + ]) + module_args = { + 'enabled': False, + 'comment': 'testing policy', + 'name': 'policy2' + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_error_modify_snapshot_policy_rest(): + ''' Neagtive test - modify snapshot policy with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['generic_error']), + ]) + module_args = { + 'enabled': 'no' + } + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert 'Error on modifying snapshot policy:' in error + + +def test_modify_snapshot_schedule_rest(): + ''' Test modify snapshot schedule and disable policy with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412', SRR['empty_good']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules/671aa46e-11ad-11ec-a267-005056b30dsa', SRR['empty_good']) + ]) + module_args = { + "enabled": False, + "count": ['10', '20'], + "schedule": ['hourly', 'weekly'] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_snapshot_schedule_count_label_rest(): + ''' Test modify snapmirror_label and count with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules/671aa46e-11ad-11ec-a267-005056b30dsa', SRR['empty_good']) + ]) + module_args = { + "snapmirror_label": ['', 'weekly'], + "count": [10, 20], + "schedule": ['hourly', 'weekly'] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_snapshot_schedule_count_rest(): + ''' Test modify snapshot count with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules/671aa46e-11ad-11ec-a267-005056b30dsa', SRR['empty_good']) + ]) + module_args = { + "count": "10,40", + "schedule": ['hourly', 'weekly'], + "snapmirror_label": ['', ''] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_modify_snapshot_count_rest(): + ''' Test modify snapshot count, snapmirror_label and prefix with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']), + ('PATCH', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + "count": "20,30", + "schedule": ['hourly', 'weekly'], + "snapmirror_label": ['hourly', ''], + "prefix": ['', 'weekly'] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_delete_snapshot_schedule_rest(): + ''' Test delete snapshot schedule with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']), + ('DELETE', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']) + ]) + module_args = { + "count": 30, + "schedule": ['weekly'] + } + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_delete_all_snapshot_schedule_rest(): + ''' Validate deleting all snapshot schedule with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']) + ]) + module_args = { + "count": [], + "schedule": [] + } + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert msg in error + + +def test_add_snapshot_schedule_rest(): + ''' Test modify by adding schedule to a snapshot with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']), + ('GET', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['schedule_record']), + ('POST', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['empty_good']), + ('POST', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['success']), + ('POST', 'storage/snapshot-policies/1cd8a442-86d1-11e0-ae1c-123478563412/schedules', SRR['success']) + ]) + module_args = { + "count": "10,30,20,1,2", + "schedule": ['hourly', 'weekly', 'daily', 'monthly', '5min'], + "snapmirror_label": ['', '', '', '', '']} + assert create_and_apply(my_module, ARGS_REST, module_args) + + +def test_add_max_snapshot_schedule_rest(): + ''' Test modify by adding more than maximum number of schedule to a snapshot with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'storage/snapshot-policies', SRR['snapshot_record']) + ]) + module_args = { + "count": "10,30,20,1,2,3", + "schedule": ['hourly', 'weekly', 'daily', 'monthly', '5min', '10min'], + "snapmirror_label": ['', '', '', '', '', '']} + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + error = create_and_apply(my_module, ARGS_REST, module_args, fail=True)['msg'] + assert msg in error + + +def test_invalid_count_rest(): + ''' Test invalid count for a schedule with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + current = { + 'schedule': 'weekly', + 'count': []} + my_module_object = create_module(my_module, ARGS_REST, current) + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert msg in expect_and_capture_ansible_exception(my_module_object.validate_parameters, 'fail')['msg'] + + +def test_validate_schedule_count_with_snapmirror_labels_rest(): + ''' validate when schedule has same number of elements with snapmirror labels with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + current = { + 'schedule': ['hourly', 'daily', 'weekly', 'monthly', '5min'], + 'snapmirror_label': ['', '', ''], + 'count': [1, 2, 3, 4, 5]} + my_module_object = create_module(my_module, ARGS_REST, current) + msg = "Error: Each Snapshot Policy schedule must have an accompanying SnapMirror Label" + assert msg in expect_and_capture_ansible_exception(my_module_object.validate_parameters, 'fail')['msg'] + + +def test_validate_schedule_count_with_prefix_rest(): + ''' validate when schedule has same number of elements with prefix with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + current = { + 'schedule': ['hourly', 'daily', 'weekly', 'monthly', '5min'], + 'prefix': ['hourly', 'daily', 'weekly'], + 'count': [1, 2, 3, 4, 5]} + my_module_object = create_module(my_module, ARGS_REST, current) + msg = "Error: Each Snapshot Policy schedule must have an accompanying prefix" + assert msg in expect_and_capture_ansible_exception(my_module_object.validate_parameters, 'fail')['msg'] + + +def test_validate_schedule_count_max_rest(): + ''' Validate maximum number of snapshot schedule and count with REST API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + current = { + 'schedule': ['hourly', 'daily', 'weekly', 'monthly', '5min', '10min'], + 'count': [1, 2, 3, 4, 5, 6]} + my_module_object = create_module(my_module, ARGS_REST, current) + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert msg in expect_and_capture_ansible_exception(my_module_object.validate_parameters, 'fail')['msg'] + + +def test_invalid_count_number_rest(): + ''' validate when schedule has same number of elements with count with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + current = { + 'schedule': ['hourly', 'daily', 'weekly'], + 'count': [1, 2, 3, 4, 5, 6] + } + my_module_object = create_module(my_module, ARGS_REST, current) + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert msg in expect_and_capture_ansible_exception(my_module_object.validate_parameters, 'fail')['msg'] + + +def test_invalid_schedule_count_rest(): + ''' validate invalid number of schedule and count with rest API ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + current = { + 'schedule': [], + 'count': []} + my_module_object = create_module(my_module, ARGS_REST, current) + msg = 'Error: A Snapshot policy must have at least 1 ' \ + 'schedule and can have up to a maximum of 5 schedules, with a count ' \ + 'representing the maximum number of Snapshot copies for each schedule' + assert msg in expect_and_capture_ansible_exception(my_module_object.validate_parameters, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp.py new file mode 100644 index 000000000..24d8c5da4 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp.py @@ -0,0 +1,158 @@ +# (c) 2018-2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP snmp Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import assert_no_warnings, set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snmp \ + import NetAppONTAPSnmp as my_module, main as uut_main # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + args = { + 'state': 'present', + 'hostname': '10.10.10.10', + 'username': 'admin', + 'https': 'true', + 'validate_certs': 'false', + 'password': 'password', + 'use_rest': 'always' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_6': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'community_user_record': (200, { + 'records': [{ + "name": "snmpv3user2", + "authentication_method": "community", + 'engine_id': "80000315058e02057c0fb8e911bc9f005056bb942e" + }], + 'num_records': 1 + }, None), + 'snmp_user_record': (200, { + 'records': [{ + "name": "snmpv3user3", + "authentication_method": "usm", + 'engine_id': "80000315058e02057c0fb8e911bc9f005056bb942e" + }], + 'num_records': 1 + }, None), +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(dict(hostname='')) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + msg = 'missing required arguments: community_name' + assert msg == exc.value.args[0]['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_get_community_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['community_name'] = 'snmpv3user2' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['community_user_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_create_community_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['community_name'] = 'snmpv3user2' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_community_called(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['community_name'] = 'snmpv3user2' + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['community_user_record'], # get + SRR['community_user_record'], + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is True + assert_no_warnings() + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_ensure_delete_community_idempotent(mock_request, patch_ansible): + ''' test get''' + args = dict(default_args()) + args['community_name'] = 'snmpv3user2' + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest_9_8'], # get version + SRR['zero_record'], # get + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: %s' % exc.value.args[0]) + assert exc.value.args[0]['changed'] is False + assert_no_warnings() diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py new file mode 100644 index 000000000..43b9624bb --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_snmp_traphosts.py @@ -0,0 +1,153 @@ +# (c) 2020-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_snmp_traphosts """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_snmp_traphosts \ + import NetAppONTAPSnmpTraphosts as traphost_module # module under test + +# REST API canned responses when mocking send_request +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'snmp_record': ( + 200, + { + "records": [ + { + "host": "example.com", + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'host': 'example.com' +} + + +def test_rest_error_get(): + '''Test error rest get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['generic_error']), + ]) + error = create_and_apply(traphost_module, ARGS_REST, fail=True)['msg'] + msg = "Error on fetching snmp traphosts info:" + assert msg in error + + +def test_rest_create(): + '''Test create snmp traphost''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['empty_records']), + ('POST', 'support/snmp/traphosts', SRR['empty_good']), + ]) + assert create_and_apply(traphost_module, ARGS_REST) + + +def test_rest_error_create(): + '''Test error create snmp traphost''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['empty_records']), + ('POST', 'support/snmp/traphosts', SRR['generic_error']), + ]) + error = create_and_apply(traphost_module, ARGS_REST, fail=True)['msg'] + msg = "Error creating traphost:" + assert msg in error + + +def test_rest_delete(): + '''Test delete snmp traphost''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['snmp_record']), + ('DELETE', 'support/snmp/traphosts/example.com', SRR['empty_good']), + ]) + module_args = { + 'state': 'absent' + } + assert create_and_apply(traphost_module, ARGS_REST, module_args) + + +def test_rest_error_delete(): + '''Test error delete snmp traphost''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['snmp_record']), + ('DELETE', 'support/snmp/traphosts/example.com', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent' + } + error = create_and_apply(traphost_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error deleting traphost:" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['snmp_record']) + ]) + module_args = { + 'state': 'present' + } + assert not create_and_apply(traphost_module, ARGS_REST, module_args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'support/snmp/traphosts', SRR['empty_records']) + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(traphost_module, ARGS_REST, module_args)['changed'] + + +def test_ontap_version_rest(): + ''' Test ONTAP version ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = {'use_rest': 'always'} + error = create_module(traphost_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error: na_ontap_snmp_traphosts only supports REST, and requires ONTAP 9.7.0 or later." + assert msg in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py new file mode 100644 index 000000000..40bf3e851 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_software_update.py @@ -0,0 +1,1124 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_software_update ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_warning_was_raised, expect_and_capture_ansible_exception, call_main, create_module, create_and_apply, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import JOB_GET_API, rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_error_message, zapi_responses + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_software_update \ + import NetAppONTAPSoftwareUpdate as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def cluster_image_info(mixed=False): + version1 = 'Fattire__9.3.0' + version2 = version1 + if mixed: + version2 += '.1' + return { + 'num-records': 1, + # composite response, attributes-list for cluster-image-get-iter and attributes for cluster-image-get + 'attributes-list': [ + {'cluster-image-info': { + 'node-id': 'node4test', + 'current-version': version1}}, + {'cluster-image-info': { + 'node-id': 'node4test', + 'current-version': version2}}, + ], + 'attributes': { + 'cluster-image-info': { + 'node-id': 'node4test', + 'current-version': version1 + }}, + } + + +def software_update_info(status): + if status == 'async_pkg_get_phase_complete': + overall_status = 'completed' + elif status == 'async_pkg_get_phase_running': + overall_status = 'in_progress' + else: + overall_status = status + + return { + 'num-records': 1, + # 'attributes-list': {'cluster-image-info': {'node-id': node}}, + 'progress-status': status, + 'progress-details': 'some_details', + 'failure-reason': 'failure_reason', + 'attributes': { + 'ndu-progress-info': { + 'overall-status': overall_status, + 'completed-node-count': '0', + 'validation-reports': [{ + 'validation-report-info': { + 'one_check': 'one', + 'two_check': 'two' + }}]}}, + } + + +cluster_image_validation_report_list = { + 'cluster-image-validation-report-list': [ + {'cluster-image-validation-report-list-info': { + 'required-action': { + 'required-action-info': { + 'action': 'some_action', + 'advice': 'some_advice', + 'error': 'some_error', + } + }, + 'ndu-check': 'ndu_ck', + 'ndu-status': 'ndu_st', + }}, + {'cluster-image-validation-report-list-info': { + 'required-action': { + 'required-action-info': { + 'action': 'other_action', + 'advice': 'other_advice', + 'error': 'other_error', + } + }, + 'ndu-check': 'ndu_ck', + 'ndu-status': 'ndu_st', + }}, + ], +} + + +cluster_image_package_local_info = { + 'attributes-list': [ + {'cluster-image-package-local-info': { + 'package-version': 'Fattire__9.3.0', + + }}, + {'cluster-image-package-local-info': { + 'package-version': 'Fattire__9.3.1', + + }}, + ], +} + + +ZRR = zapi_responses({ + 'cluster_image_info': build_zapi_response(cluster_image_info()), + 'cluster_image_info_mixed': build_zapi_response(cluster_image_info(True)), + 'software_update_info_running': build_zapi_response(software_update_info('async_pkg_get_phase_running')), + 'software_update_info_complete': build_zapi_response(software_update_info('async_pkg_get_phase_complete')), + 'software_update_info_error': build_zapi_response(software_update_info('error')), + 'cluster_image_validation_report_list': build_zapi_response(cluster_image_validation_report_list), + 'cluster_image_package_local_info': build_zapi_response(cluster_image_package_local_info, 2), + 'error_18408': build_zapi_error(18408, 'pkg exists!') +}) + + +def cluster_software_node_info(mixed=False): + version1 = 'Fattire__9.3.0' + version2 = 'GEN_MAJ_min_2' if mixed else version1 + return { + 'nodes': [ + {'name': 'node1', 'version': version1}, + {'name': 'node2', 'version': version2}, + ] + } + + +def cluster_software_state_info(state): + # state: in_progress, completed, ... + return { + 'state': state + } + + +cluster_software_validation_results = { + "validation_results": [{ + "action": { + "message": "Use NFS hard mounts, if possible." + }, + "issue": { + "message": "Cluster HA is not configured in the cluster." + }, + "status": "warning", + "update_check": "nfs_mounts" + }], +} + + +def cluster_software_download_info(state): + return { + "message": "message", + "state": state, + } + + +SRR = rest_responses({ + 'cluster_software_node_info': (200, cluster_software_node_info(), None), + 'cluster_software_node_info_mixed': (200, cluster_software_node_info(True), None), + 'cluster_software_validation_results': (200, cluster_software_validation_results, None), + 'cluster_software_state_completed': (200, cluster_software_state_info('completed'), None), + 'cluster_software_state_in_progress': (200, cluster_software_state_info('in_progress'), None), + 'cluster_software_state_in_error': (200, cluster_software_state_info('in_error'), None), + 'cluster_software_download_state_success': (200, cluster_software_download_info('success'), None), + 'cluster_software_download_state_running': (200, cluster_software_download_info('running'), None), + 'cluster_software_package_info_ft': (200, {'records': [{'version': 'Fattire__9.3.0'}]}, None), + 'cluster_software_package_info_pte': (200, {'records': [{'version': 'PlinyTheElder'}]}, None), + 'error_image_already_exists': (200, {}, 'Package image with the same name already exists'), + 'error_download_in_progress': (200, {}, 'Software get operation already in progress'), +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'package_version': 'Fattire__9.3.0', + 'package_url': 'abc.com', + 'https': 'true', + 'stabilize_minutes': 10 +} + + +@patch('time.sleep') +def test_ensure_apply_for_update_called(dont_sleep): + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-update', ZRR['success']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-package-delete', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_ensure_apply_for_update_called_node(dont_sleep): + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-update', ZRR['success']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-package-delete', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "nodes": ["node_abc"], + "package_version": "PlinyTheElder", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_ensure_apply_for_update_called_idempotent(dont_sleep): + # image already installed + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + + ]) + module_args = { + "use_rest": "never", + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_ensure_apply_for_update_called_idempotent_node(dont_sleep): + # image already installed + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get', ZRR['cluster_image_info']), + + ]) + module_args = { + "use_rest": "never", + "nodes": ["node_abc"], + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_ensure_apply_for_update_called_with_validation(dont_sleep): + # for validation before update + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-validate', ZRR['success']), + ('ZAPI', 'cluster-image-update', ZRR['success']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-package-delete', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + "validate_after_download": True, + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_negative_download_error(dont_sleep): + ''' downloading software - error while downloading the image - first request ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['error']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = zapi_error_message('Error downloading cluster image package for abc.com') + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_negative_download_progress_error(dont_sleep): + ''' downloading software - error while downloading the image - progress error ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_error']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = 'Error downloading package: failure_reason' + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_negative_download_progress_error_no_status(dont_sleep): + ''' downloading software - error while downloading the image - progress error ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['success']), # retrying if status cannot be found + ('ZAPI', 'cluster-image-get-download-progress', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_error']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = 'Error downloading package: failure_reason' + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_negative_download_progress_error_fetching_status(dont_sleep): + ''' downloading software - error while downloading the image - progress error ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['error']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = zapi_error_message('Error fetching cluster image package download progress for abc.com') + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_negative_update_error_zapi(dont_sleep): + ''' updating software - error while updating the image ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-update', ZRR['error']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['error']), # additional error details + ('ZAPI', 'cluster-image-validate', ZRR['error']), # additional error details + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = zapi_error_message('Error updating cluster image for PlinyTheElder') + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_negative_update_error(dont_sleep): + ''' updating software - error while updating the image ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-update', ZRR['success']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_error']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_error']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = 'Error updating image using ZAPI: overall_status: error.' + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_negative_update_error_timeout(dont_sleep): + ''' updating software - error while updating the image ''' + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_running']), + ('ZAPI', 'cluster-image-get-download-progress', ZRR['software_update_info_complete']), + ('ZAPI', 'cluster-image-update', ZRR['success']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_error']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['software_update_info_running']), + ]) + module_args = { + "use_rest": "never", + "package_version": "PlinyTheElder", + } + error = 'Timeout error updating image using ZAPI: overall_status: in_progress. Should the timeout value be increased?'\ + ' Current value is 1800 seconds. The software update continues in background.' + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + module_args = { + "use_rest": "never" + } + assert 'Error: the python NetApp-Lib module is required. Import error: None' == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_fail_with_http(): + args = dict(DEFAULT_ARGS) + args.pop('https') + assert 'Error: https parameter must be True' == call_main(my_main, args, fail=True)['msg'] + + +def test_is_update_required(): + ''' update is required if nodes have different images, or version does not match ''' + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info_mixed']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info_mixed']), + ]) + module_args = { + "use_rest": "never" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert not my_obj.is_update_required() + assert my_obj.is_update_required() + my_obj.parameters["package_version"] = "PlinyTheElder" + assert my_obj.is_update_required() + assert my_obj.is_update_required() + + +def test_cluster_image_validate(): + ''' check error, then check that reports are read correctly ''' + register_responses([ + ('ZAPI', 'cluster-image-validate', ZRR['error']), + ('ZAPI', 'cluster-image-validate', ZRR['cluster_image_validation_report_list']), + ]) + module_args = { + "use_rest": "never" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.cluster_image_validate() == zapi_error_message('Error running cluster image validate') + reports = my_obj.cluster_image_validate() + assert 'required_action' in reports[0] + assert 'action' in reports[0]['required_action'] + assert reports[0]['required_action']['action'] == 'some_action' + assert reports[1]['required_action']['action'] == 'other_action' + + +def test_cluster_image_zapi_errors(): + ''' ZAPi error on delete ''' + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['error']), + ('ZAPI', 'cluster-image-get', ZRR['error']), + ('ZAPI', 'cluster-image-package-delete', ZRR['error']), + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['error']), + ]) + module_args = { + "use_rest": "never" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert expect_and_capture_ansible_exception(my_obj.cluster_image_get_versions, 'fail')['msg'] ==\ + zapi_error_message('Error fetching cluster image details: Fattire__9.3.0') + assert expect_and_capture_ansible_exception(my_obj.cluster_image_get_for_node, 'fail', 'node')['msg'] ==\ + zapi_error_message('Error fetching cluster image details for node') + assert expect_and_capture_ansible_exception(my_obj.cluster_image_package_delete, 'fail')['msg'] ==\ + zapi_error_message('Error deleting cluster image package for Fattire__9.3.0') + assert expect_and_capture_ansible_exception(my_obj.cluster_image_packages_get_zapi, 'fail')['msg'] ==\ + zapi_error_message('Error getting list of local packages') + + +def test_cluster_image_get_for_node_none_none(): + ''' empty response on get ''' + register_responses([ + ('ZAPI', 'cluster-image-get', ZRR['success']), + ]) + module_args = { + "use_rest": "never" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.cluster_image_get_for_node('node') == (None, None) + + +def test_cluster_image_package_download(): + ''' ZAPI error on download - package already exists''' + register_responses([ + ('ZAPI', 'cluster-image-package-download', ZRR['error']), + ('ZAPI', 'cluster-image-package-download', ZRR['error_18408']), + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['cluster_image_package_local_info']), + ('ZAPI', 'cluster-image-package-download', ZRR['success']), + ]) + module_args = { + "use_rest": "never" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert expect_and_capture_ansible_exception(my_obj.cluster_image_package_download, 'fail')['msg'] ==\ + zapi_error_message('Error downloading cluster image package for abc.com') + assert my_obj.cluster_image_package_download() + assert not my_obj.cluster_image_package_download() + + +def test_cluster_image_update_progress_get_error(): + ''' ZAPI error on progress get ''' + register_responses([ + ('ZAPI', 'cluster-image-update-progress-info', ZRR['error']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['error']), + ('ZAPI', 'cluster-image-update-progress-info', ZRR['error']), + ]) + module_args = { + "use_rest": "never" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert expect_and_capture_ansible_exception(my_obj.cluster_image_update_progress_get, 'fail', ignore_connection_error=False)['msg'] ==\ + zapi_error_message('Error fetching cluster image update progress details') + assert my_obj.cluster_image_update_progress_get() == {} + assert my_obj.cluster_image_update_progress_get(ignore_connection_error=True) == {} + + +def test_delete_package_zapi(): + # deleting a package + register_responses([ + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['cluster_image_package_local_info']), + ('ZAPI', 'cluster-image-package-delete', ZRR['success']), + # idempotency + ('ZAPI', 'cluster-image-package-local-get-iter', ZRR['no_records']), + ]) + module_args = { + "use_rest": "never", + "state": "absent", + "package_version": "Fattire__9.3.0", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +# REST tests + +@patch('time.sleep') +def test_rest_ensure_apply_for_update_called(dont_sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success_with_job_uuid']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['generic_error']), + ('GET', JOB_GET_API, SRR['job_generic_response_success']), + ('PATCH', 'cluster/software', SRR['success_with_job_uuid']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['generic_error']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['job_generic_response_success']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['cluster_software_state_completed']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ('DELETE', 'cluster/software/packages/PlinyTheElder', SRR['success_with_job_uuid']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['job_generic_response_success']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_ensure_apply_for_update_called_idempotent(dont_sleep): + # image already installed + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + + ]) + module_args = { + "use_rest": "always", + } + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_ensure_apply_for_update_called_with_validation(dont_sleep): + # for validation before update + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['success']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ('PATCH', 'cluster/software', SRR['success']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['cluster_software_state_completed']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ('DELETE', 'cluster/software/packages/PlinyTheElder', SRR['success']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "validate_after_download": True, + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_download_idempotent_package_already_exist_pre(dont_sleep): + ''' downloading software - package already present before attempting download ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['cluster_software_package_info_pte']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "download_only": True, + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_download_idempotent_package_already_exist_post(dont_sleep): + ''' downloading software - package already present when attempting download ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['error_image_already_exists']), + ('GET', 'cluster/software/packages', SRR['cluster_software_package_info_pte']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "download_only": True, + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_download_already_in_progress(dont_sleep): + ''' downloading software - package already present when attempting download ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['error_download_in_progress']), + ('GET', 'cluster/software/download', SRR['cluster_software_download_state_running']), + ('GET', 'cluster/software/download', SRR['generic_error']), + ('GET', 'cluster/software/download', SRR['generic_error']), + ('GET', 'cluster/software/download', SRR['cluster_software_download_state_running']), + ('GET', 'cluster/software/download', SRR['cluster_software_download_state_success']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "download_only": True, + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_negative_download_package_already_exist(dont_sleep): + ''' downloading software - error while downloading the image - first request ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['error_image_already_exists']), + ('GET', 'cluster/software/packages', SRR['cluster_software_package_info_ft']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "download_only": True, + } + error = 'Error: another package with the same file name exists: found: Fattire__9.3.0' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_rest_negative_download_error(dont_sleep): + ''' downloading software - error while downloading the image - first request ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + } + error = rest_error_message('Error downloading software', 'cluster/software/download', ' - current versions:') + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_rest_negative_download_progress_error(dont_sleep): + ''' downloading software - error while downloading the image - progress error ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success_with_job_uuid']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['job_generic_response_running']), + ('GET', JOB_GET_API, SRR['job_generic_response_failure']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + } + error = 'Error downloading software: job reported error: job reported failure, received' + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_rest_negative_update_error_sync(dont_sleep): + ''' updating software - error while updating the image ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + # second error on validate results + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + } + error = rest_error_message('Error updating software', 'cluster/software') + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + assert 'validation results:' in msg + assert "'issue': {'message': 'Cluster HA is not configured in the cluster.'}" in msg + # seconnd error on validate results + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + assert 'validation results:' in msg + assert 'validation results: Error fetching software information for validation_results:' in msg + + +@patch('time.sleep') +def test_rest_negative_update_error_waiting_for_state(dont_sleep): + ''' updating software - error while updating the image ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['success']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + # over 20 consecutive errors + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['success']), + ('GET', 'cluster/software', SRR['cluster_software_state_in_progress']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "timeout": 240 + } + error = rest_error_message('Error: unable to read image update state, using timeout 240. ' + 'Last error: Error fetching software information for state', 'cluster/software') + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + assert 'All errors:' in msg + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "timeout": 1800 + } + # stop after 20 errors + error = rest_error_message('Error: unable to read image update state, using timeout 1800. ' + 'Last error: Error fetching software information for state', 'cluster/software') + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + assert 'All errors:' in msg + + +@patch('time.sleep') +def test_rest_negative_update_error_job_errors(dont_sleep): + ''' updating software - error while updating the image ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + # second error on validate results + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('POST', 'cluster/software/download', SRR['success']), + ('PATCH', 'cluster/software', SRR['generic_error']), + ('GET', 'cluster/software', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + } + error = rest_error_message('Error updating software', 'cluster/software') + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + assert 'validation results:' in msg + assert "'issue': {'message': 'Cluster HA is not configured in the cluster.'}" in msg + # seconnd error on validate results + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + assert 'validation results:' in msg + assert 'validation results: Error fetching software information for validation_results:' in msg + + +def test_rest_is_update_required(): + ''' update is required if nodes have different images, or version does not match ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('GET', 'cluster/software', SRR['cluster_software_node_info_mixed']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('GET', 'cluster/software', SRR['cluster_software_node_info_mixed']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert not my_obj.is_update_required() + assert my_obj.is_update_required() + my_obj.parameters["package_version"] = "PlinyTheElder" + assert my_obj.is_update_required() + assert my_obj.is_update_required() + + +@patch('time.sleep') +def test_rest_cluster_image_validate(dont_sleep): + ''' check error, then check that reports are read correctly ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('PATCH', 'cluster/software', SRR['generic_error']), + ('PATCH', 'cluster/software', SRR['success']), + ('GET', 'cluster/software', SRR['zero_records']), # retried as validation_results is not present - empty record + ('GET', 'cluster/software', SRR['cluster_software_node_info']), # retried as validation_results is not present - other keys + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.cluster_image_validate() == rest_error_message('Error validating software', 'cluster/software') + reports = my_obj.cluster_image_validate() + assert 'action' in reports[0] + assert 'issue' in reports[0] + + +def test_rest_cluster_image_errors(): + ''' REST error on get and delete ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software', SRR['generic_error']), + ('DELETE', 'cluster/software/packages/Fattire__9.3.0', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert expect_and_capture_ansible_exception(my_obj.cluster_image_get_versions, 'fail')['msg'] ==\ + rest_error_message('Error fetching software information for nodes', 'cluster/software') + assert expect_and_capture_ansible_exception(my_obj.cluster_image_package_delete, 'fail')['msg'] ==\ + rest_error_message('Error deleting cluster software package for Fattire__9.3.0', 'cluster/software/packages/Fattire__9.3.0') + + +def test_rest_cluster_image_get_for_node_versions(): + ''' getting nodes versions ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ('GET', 'cluster/software', SRR['cluster_software_node_info']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.cluster_image_get_rest('versions') == [('node1', 'Fattire__9.3.0'), ('node2', 'Fattire__9.3.0')] + my_obj.parameters['nodes'] = ['node1'] + assert my_obj.cluster_image_get_rest('versions') == [('node1', 'Fattire__9.3.0')] + my_obj.parameters['nodes'] = ['node2'] + assert my_obj.cluster_image_get_rest('versions') == [('node2', 'Fattire__9.3.0')] + my_obj.parameters['nodes'] = ['node2', 'node3'] + error = 'Error: node not found in cluster: node3.' + assert expect_and_capture_ansible_exception(my_obj.cluster_image_get_rest, 'fail', 'versions')['msg'] == error + my_obj.parameters['nodes'] = ['node4', 'node3'] + error = 'Error: nodes not found in cluster: node4, node3.' + assert expect_and_capture_ansible_exception(my_obj.cluster_image_get_rest, 'fail', 'versions')['msg'] == error + + +def test_rest_negative_cluster_image_get_for_node_versions(): + ''' getting nodes versions ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software', SRR['zero_records']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error fetching software information for nodes: no record calling cluster/software" + assert error in expect_and_capture_ansible_exception(my_obj.cluster_image_get_rest, 'fail', 'versions')['msg'] + error = "Unexpected results for what: versions, record: {'validation_results':" + assert error in expect_and_capture_ansible_exception(my_obj.cluster_image_get_rest, 'fail', 'versions')['msg'] + + +def test_rest_cluster_image_package_download(): + ''' download error, download error indicating package exists, successful download ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('POST', 'cluster/software/download', SRR['generic_error']), + ('POST', 'cluster/software/download', SRR['error_image_already_exists']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ('POST', 'cluster/software/download', SRR['success']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = rest_error_message('Error downloading software', 'cluster/software/download', " - current versions: ['not available with force_update']") + assert error in expect_and_capture_ansible_exception(my_obj.download_software_rest, 'fail')['msg'] + error = 'Error: ONTAP reported package already exists, but no package found: ' + assert error in expect_and_capture_ansible_exception(my_obj.download_software_rest, 'fail')['msg'] + assert not my_obj.download_software_rest() + + +def test_rest_post_update_tasks(): + ''' validate success and error messages ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ('DELETE', 'cluster/software/packages/Fattire__9.3.0', SRR['success']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ('GET', 'cluster/software', SRR['cluster_software_validation_results']), + ]) + module_args = { + "use_rest": "always" + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.post_update_tasks_rest('completed') == cluster_software_validation_results['validation_results'] + # time out + error = 'Timeout error updating image using REST: state: in_progress.' + assert error in expect_and_capture_ansible_exception(my_obj.post_update_tasks_rest, 'fail', 'in_progress')['msg'] + # other state + error = 'Error updating image using REST: state: error_state.' + assert error in expect_and_capture_ansible_exception(my_obj.post_update_tasks_rest, 'fail', 'error_state')['msg'] + + +def test_rest_delete_package(): + ''' deleting package ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['cluster_software_package_info_pte']), + ('DELETE', 'cluster/software/packages/PlinyTheElder', SRR['success']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['cluster_software_package_info_ft']), + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "state": "absent", + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_negative_delete_package(): + ''' deleting package ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['generic_error']), + # idempotency + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster/software/packages', SRR['cluster_software_package_info_pte']), + ('DELETE', 'cluster/software/packages/PlinyTheElder', SRR['generic_error']) + ]) + module_args = { + "use_rest": "always", + "package_version": "PlinyTheElder", + "state": "absent", + } + error = rest_error_message('Error: unable to fetch local package list', 'cluster/software/packages') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = rest_error_message('Error deleting cluster software package for PlinyTheElder', 'cluster/software/packages/PlinyTheElder') + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_partially_supported_options(): + ''' validate success and error messages ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + "use_rest": "always", + } + error = 'Minimum version of ONTAP for stabilize_minutes is (9, 8)' + assert error in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert create_module(my_module, DEFAULT_ARGS, module_args) + module_args = { + "use_rest": "always", + "nodes": "node1" + } + error = 'Minimum version of ONTAP for nodes is (9, 9)' + assert error in create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args = { + "use_rest": "auto", + "nodes": "node1" + } + assert create_module(my_module, DEFAULT_ARGS, module_args) + print_warnings + assert_warning_was_raised('Falling back to ZAPI because of unsupported option(s) or option value(s) "nodes" in REST require (9, 9)') + + +def test_missing_arg(): + args = dict(DEFAULT_ARGS) + args.pop('package_url') + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster/software/packages', SRR['zero_records']), + ]) + module_args = { + "use_rest": "always", + } + error = 'Error: packague_url is a required parameter to download the software package.' + assert error in call_main(my_main, args, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_auto_giveback.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_auto_giveback.py new file mode 100644 index 000000000..3c6d345c1 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_auto_giveback.py @@ -0,0 +1,320 @@ +''' unit tests ONTAP Ansible module: na_ontap_storage_auto_giveback ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_storage_auto_giveback \ + import NetAppOntapStorageAutoGiveback as storage_auto_giveback_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'storage_auto_giveback_enabled_record': (200, { + 'num_records': 1, + 'records': [{ + 'node': 'node1', + 'auto_giveback': True, + 'auto_giveback_after_panic': True + }] + }, None), + 'storage_auto_giveback_disabled_record': (200, { + 'num_records': 1, + "records": [{ + 'node': 'node1', + 'auto_giveback': False, + 'auto_giveback_after_panic': False + }] + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'auto_giveback_enabled': + xml = self.build_storage_auto_giveback_enabled_info() + elif self.type == 'auto_giveback_disabled': + xml = self.build_storage_auto_giveback_disabled_info() + elif self.type == 'auto_giveback_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_storage_auto_giveback_enabled_info(): + ''' build xml data for cf-get-iter ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'num-records': 1, + 'attributes-list': { + 'storage-failover-info': { + 'sfo-node-info': { + 'node-related-info': { + 'node': 'node1' + } + }, + 'sfo-options-info': { + 'options-related-info': { + 'auto-giveback-enabled': 'true', + 'sfo-giveback-options-info': { + 'giveback-options': { + 'auto-giveback-after-panic-enabled': 'true' + } + } + } + } + } + } + } + + xml.translate_struct(data) + return xml + + @staticmethod + def build_storage_auto_giveback_disabled_info(): + ''' build xml data for cf-get-iter ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'num-records': 1, + 'attributes-list': { + 'storage-failover-info': { + 'sfo-node-info': { + 'node-related-info': { + 'node': 'node1' + } + }, + 'sfo-options-info': { + 'options-related-info': { + 'auto-giveback-enabled': 'false', + 'sfo-giveback-options-info': { + 'giveback-options': { + 'auto-giveback-after-panic-enabled': 'false' + } + } + } + } + } + } + } + + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self, use_rest=None): + if self.onbox: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + name = 'node1' + auto_giveback_enabled = True + auto_giveback_after_panic_enabled = True + else: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + name = 'node1' + auto_giveback_enabled = True + auto_giveback_after_panic_enabled = True + + args = dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + 'name': name, + 'auto_giveback_enabled': auto_giveback_enabled, + 'auto_giveback_after_panic_enabled': auto_giveback_after_panic_enabled + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_storage_auto_giveback_mock_object(cx_type='zapi', kind=None): + storage_auto_giveback_obj = storage_auto_giveback_module() + if cx_type == 'zapi': + if kind is None: + storage_auto_giveback_obj.server = MockONTAPConnection() + else: + storage_auto_giveback_obj.server = MockONTAPConnection(kind=kind) + return storage_auto_giveback_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + storage_auto_giveback_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called_existing(self): + ''' test get_storage_auto_giveback for existing config ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = storage_auto_giveback_module() + my_obj.server = MockONTAPConnection(kind='auto_giveback_enabled') + assert my_obj.get_storage_auto_giveback() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_storage_auto_giveback.NetAppOntapStorageAutoGiveback.modify_storage_auto_giveback') + def test_successful_enable(self, modify_storage_auto_giveback): + ''' enable storage_auto_giveback and testing idempotency ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = storage_auto_giveback_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('auto_giveback_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + modify_storage_auto_giveback.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = storage_auto_giveback_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('auto_giveback_enabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_storage_auto_giveback.NetAppOntapStorageAutoGiveback.modify_storage_auto_giveback') + def test_successful_disable(self, modify_storage_auto_giveback): + ''' disable storage_auto_giveback and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['auto_giveback_enabled'] = False + data['auto_giveback_after_panic_enabled'] = False + set_module_args(data) + my_obj = storage_auto_giveback_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('auto_giveback_enabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # modify_storage_auto_giveback.assert_called_with() + # to reset na_helper from remembering the previous 'changed' value + data = self.set_default_args(use_rest='Never') + data['auto_giveback_enabled'] = False + data['auto_giveback_after_panic_enabled'] = False + set_module_args(data) + my_obj = storage_auto_giveback_module() + my_obj.ems_log_event = Mock(return_value=None) + if not self.onbox: + my_obj.server = MockONTAPConnection('auto_giveback_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = storage_auto_giveback_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('auto_giveback_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_storage_auto_giveback() + assert 'Error modifying auto giveback' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_auto_giveback_mock_object(cx_type='rest').apply() + assert SRR['generic_error'][2] in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_enabled_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_auto_giveback_disabled_record'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_auto_giveback_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_enabled_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_auto_giveback_enabled_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_auto_giveback_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_disabled_rest(self, mock_request): + data = self.set_default_args() + data['auto_giveback_enabled'] = False + data['auto_giveback_after_panic_enabled'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_auto_giveback_enabled_record'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_auto_giveback_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_disabled_rest(self, mock_request): + data = self.set_default_args() + data['auto_giveback_enabled'] = False + data['auto_giveback_after_panic_enabled'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_auto_giveback_disabled_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_auto_giveback_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_failover.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_failover.py new file mode 100644 index 000000000..aa0b7703e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_storage_failover.py @@ -0,0 +1,350 @@ +''' unit tests ONTAP Ansible module: na_ontap_storage_failover ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_storage_failover \ + import NetAppOntapStorageFailover as storage_failover_module # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'no_records': (200, {'records': []}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'storage_failover_enabled_record': (200, { + 'num_records': 1, + 'records': [{ + 'name': 'node1', + 'uuid': '56ab5d21-312a-11e8-9166-9d4fc452db4e', + 'ha': { + 'enabled': True + } + }] + }, None), + 'storage_failover_disabled_record': (200, { + 'num_records': 1, + "records": [{ + 'name': 'node1', + 'uuid': '56ab5d21-312a-11e8-9166-9d4fc452db4e', + 'ha': { + 'enabled': False + } + }] + }, None), + 'no_ha_record': (200, { + 'num_records': 1, + "records": [{ + 'name': 'node1', + 'uuid': '56ab5d21-312a-11e8-9166-9d4fc452db4e', + }] + }, None) +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None): + ''' save arguments ''' + self.type = kind + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'storage_failover_enabled': + xml = self.build_storage_failover_enabled_info() + elif self.type == 'storage_failover_disabled': + xml = self.build_storage_failover_disabled_info() + elif self.type == 'storage_failover_fail': + raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") + self.xml_out = xml + return xml + + @staticmethod + def build_storage_failover_enabled_info(): + ''' build xml data for cf-status ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'is-enabled': 'true' + } + + xml.translate_struct(data) + return xml + + @staticmethod + def build_storage_failover_disabled_info(): + ''' build xml data for cf-status ''' + xml = netapp_utils.zapi.NaElement('xml') + data = { + 'is-enabled': 'false' + } + + xml.translate_struct(data) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + self.onbox = False + + def set_default_args(self, use_rest=None): + if self.onbox: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + node_name = 'node1' + else: + hostname = '10.10.10.10' + username = 'username' + password = 'password' + node_name = 'node1' + + args = dict({ + 'state': 'present', + 'hostname': hostname, + 'username': username, + 'password': password, + 'node_name': node_name + }) + + if use_rest is not None: + args['use_rest'] = use_rest + + return args + + @staticmethod + def get_storage_failover_mock_object(cx_type='zapi', kind=None): + storage_failover_obj = storage_failover_module() + if cx_type == 'zapi': + if kind is None: + storage_failover_obj.server = MockONTAPConnection() + else: + storage_failover_obj.server = MockONTAPConnection(kind=kind) + return storage_failover_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + storage_failover_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_get_called_existing(self): + ''' test get_storage_failover for existing config ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = storage_failover_module() + my_obj.server = MockONTAPConnection(kind='storage_failover_enabled') + assert my_obj.get_storage_failover() + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_storage_failover.NetAppOntapStorageFailover.modify_storage_failover') + def test_successful_enable(self, modify_storage_failover): + ''' enable storage_failover and testing idempotency ''' + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = storage_failover_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('storage_failover_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + modify_storage_failover.assert_called_with({'is_enabled': False}) + # to reset na_helper from remembering the previous 'changed' value + set_module_args(self.set_default_args(use_rest='Never')) + my_obj = storage_failover_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('storage_failover_enabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_storage_failover.NetAppOntapStorageFailover.modify_storage_failover') + def test_successful_disable(self, modify_storage_failover): + ''' disable storage_failover and testing idempotency ''' + data = self.set_default_args(use_rest='Never') + data['state'] = 'absent' + set_module_args(data) + my_obj = storage_failover_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('storage_failover_enabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + modify_storage_failover.assert_called_with({'is_enabled': True}) + # to reset na_helper from remembering the previous 'changed' value + my_obj = storage_failover_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('storage_failover_disabled') + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert not exc.value.args[0]['changed'] + + def test_if_all_methods_catch_exception(self): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + my_obj = storage_failover_module() + if not self.onbox: + my_obj.server = MockONTAPConnection('storage_failover_fail') + with pytest.raises(AnsibleFailJson) as exc: + my_obj.modify_storage_failover(self.get_storage_failover_mock_object()) + assert 'Error modifying storage failover' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') + def test_negative_no_netapp_lib(self, mock_request): + data = self.set_default_args(use_rest='Never') + set_module_args(data) + mock_request.return_value = False + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert 'Error: the python NetApp-Lib module is required.' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert SRR['generic_error'][2] in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_enabled_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_failover_disabled_record'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_enabled_rest(self, mock_request): + data = self.set_default_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_failover_enabled_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_successful_disabled_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_failover_enabled_record'], # get + SRR['empty_good'], # patch + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_idempotent_disabled_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['storage_failover_disabled_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_no_ha_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'present' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_ha_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert 'HA is not available on node: node1' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_node_not_found_rest(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_records'], + SRR['storage_failover_disabled_record'], # get + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert 'REST API did not return failover details for node' in exc.value.args[0]['msg'] + assert 'current nodes: node1' in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_node_not_found_rest_no_names(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_records'], + SRR['no_records'], # get all nodes + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert 'REST API did not return failover details for node' in exc.value.args[0]['msg'] + assert 'current nodes: node1' not in exc.value.args[0]['msg'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_negative_node_not_found_rest_error_on_get_nodes(self, mock_request): + data = self.set_default_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['no_records'], + SRR['generic_error'], # get all nodes + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_storage_failover_mock_object(cx_type='rest').apply() + assert 'REST API did not return failover details for node' in exc.value.args[0]['msg'] + assert 'current nodes: node1' not in exc.value.args[0]['msg'] + assert 'failed to get list of nodes' in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py new file mode 100644 index 000000000..d18d32a57 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_svm.py @@ -0,0 +1,1251 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + assert_warning_was_raised, call_main, clear_warnings, create_and_apply, create_module, expect_and_capture_ansible_exception, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_svm \ + import NetAppOntapSVM as svm_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request + +svm_info = { + "uuid": "09e9fd5e-8ebd-11e9-b162-005056b39fe7", + "name": "test_svm", + "state": "running", + "subtype": "default", + "language": "c.utf_8", + "aggregates": [{"name": "aggr_1", + "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"}, + {"name": "aggr_2", + "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"}], + "comment": "new comment", + "ipspace": {"name": "ansible_ipspace", + "uuid": "2b760d31-8dfd-11e9-b162-005056b39fe7"}, + "snapshot_policy": {"uuid": "3b611707-8dfd-11e9-b162-005056b39fe7", + "name": "old_snapshot_policy"}, + "nfs": {"enabled": True, "allowed": True}, + "cifs": {"enabled": False}, + "iscsi": {"enabled": False}, + "fcp": {"enabled": False}, + "nvme": {"enabled": False}, + 'max_volumes': 3333 +} + +svm_info_cert1 = dict(svm_info) +svm_info_cert1['certificate'] = {'name': 'cert_1', 'uuid': 'cert_uuid_1'} +svm_info_cert2 = dict(svm_info) +svm_info_cert2['certificate'] = {'name': 'cert_2', 'uuid': 'cert_uuid_2'} + +SRR = rest_responses({ + 'svm_record': (200, {'records': [svm_info]}, None), + 'svm_record_cert1': (200, {'records': [svm_info_cert1]}, None), + 'svm_record_cert2': (200, {'records': [svm_info_cert2]}, None), + 'svm_record_ap': (200, + {'records': [{"name": "test_svm", + "state": "running", + "aggregates": [{"name": "aggr_1", + "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"}, + {"name": "aggr_2", + "uuid": "850dd65b-8811-4611-ac8c-6f6240475ff9"}], + "ipspace": {"name": "ansible_ipspace", + "uuid": "2b760d31-8dfd-11e9-b162-005056b39fe7"}, + "snapshot_policy": {"uuid": "3b611707-8dfd-11e9-b162-005056b39fe7", + "name": "old_snapshot_policy"}, + "nfs": {"enabled": False}, + "cifs": {"enabled": True, "allowed": True}, + "iscsi": {"enabled": True, "allowed": True}, + "fcp": {"enabled": False}, + "nvme": {"enabled": False}, + "language": "de.utf_8", + "uuid": "svm_uuid" + }]}, None), + 'cli_record': (200, + {'records': [{"max_volumes": 100, "allowed_protocols": ['nfs', 'iscsi']}]}, None), + 'certificate_record_1': (200, + {'records': [{"name": "cert_1", + "uuid": "cert_uuid_1"}]}, None), + 'certificate_record_2': (200, + {'records': [{"name": "cert_2", + "uuid": "cert_uuid_2"}]}, None), + 'svm_web_record_1': (200, { + 'records': [{ + 'certificate': { + "uuid": "cert_uuid_1" + }, + 'client_enabled': False, + 'ocsp_enabled': False, + }]}, None), + 'svm_web_record_2': (200, { + 'records': [{ + 'certificate': { + "uuid": "cert_uuid_2" + }, + 'client_enabled': True, + 'ocsp_enabled': True, + }]}, None) +}, False) + +DEFAULT_ARGS = { + 'name': 'test_svm', + 'aggr_list': 'aggr_1,aggr_2', + 'ipspace': 'ansible_ipspace', + 'comment': 'new comment', + 'subtype': 'default', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' +} + +vserver_info = { + 'num-records': 1, + 'attributes-list': { + 'vserver-info': { + 'vserver-name': 'test_svm', + 'ipspace': 'ansible_ipspace', + 'root-volume': 'ansible_vol', + 'root-volume-aggregate': 'ansible_aggr', + 'language': 'c.utf_8', + 'comment': 'new comment', + 'snapshot-policy': 'old_snapshot_policy', + 'vserver-subtype': 'default', + 'allowed-protocols': [{'protocol': 'nfs'}, {'protocol': 'cifs'}], + 'aggr-list': [{'aggr-name': 'aggr_1'}, {'aggr-name': 'aggr_2'}], + }}} + + +ZRR = zapi_responses({ + 'svm_record': build_zapi_response(vserver_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + error = create_module(svm_module, {}, fail=True)['msg'] + assert 'missing required arguments:' in error + assert 'hostname' in error + assert 'name' in error + + +def test_error_missing_name(): + ''' Test if create throws an error if name is not specified''' + register_responses([ + ]) + args = dict(DEFAULT_ARGS) + args.pop('name') + assert create_module(svm_module, args, fail=True)['msg'] == 'missing required arguments: name' + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_error_missing_netapp_lib(mock_has_netapp_lib): + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ]) + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == create_module(svm_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_successful_create_zapi(): + '''Test successful create''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-create', ZRR['success']), + ('ZAPI', 'vserver-modify', ZRR['success']), + ]) + assert create_and_apply(svm_module, DEFAULT_ARGS)['changed'] + + +def test_create_idempotency(): + '''Test API create''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ]) + assert not create_and_apply(svm_module, DEFAULT_ARGS)['changed'] + + +def test_create_error(): + '''Test successful create''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-create', ZRR['error']), + ]) + msg = 'Error provisioning SVM test_svm: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert create_and_apply(svm_module, DEFAULT_ARGS, fail=True)['msg'] == msg + + +def test_successful_delete(): + '''Test successful delete''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-destroy', ZRR['success']), + ]) + _modify_options_with_expected_change('state', 'absent') + + +def test_error_delete(): + '''Test delete with ZAPI error + ''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-destroy', ZRR['error']), + ]) + module_args = { + 'state': 'absent', + } + msg = 'Error deleting SVM test_svm: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_delete_idempotency(): + '''Test delete idempotency ''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ]) + module_args = { + 'state': 'absent', + } + assert not create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_init(): + '''Validate that: + admin_state is ignored with ZAPI + language is set to lower case for C.UTF-8 + ''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ]) + module_args = { + 'admin_state': 'running', + 'language': 'C.uTf-8' + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + assert my_obj.parameters['language'] == 'c.utf_8' + assert_warning_was_raised('admin_state is ignored when ZAPI is used.') + + +def test_init_error(): + '''Validate that: + unallowed protocol raises an error + services is not supported with ZAPI + ''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('GET', 'cluster', SRR['is_zapi']), + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = { + 'allowed_protocols': 'dummy,humpty,dumpty,cifs,nfs', + } + error = create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert 'Unexpected value dummy in allowed_protocols.' in error + assert 'Unexpected value humpty in allowed_protocols.' in error + assert 'Unexpected value dumpty in allowed_protocols.' in error + assert 'cifs' not in error + assert 'nfs' not in error + + module_args = { + 'services': {}, + } + error = create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error == 'using services requires ONTAP 9.6 or later and REST must be enabled - Unreachable - using ZAPI.' + module_args = { + 'services': {'ndmp': {'allowed': True}}, + } + error = create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error == 'using ndmp requires ONTAP 9.7 or later and REST must be enabled - ONTAP version: 9.6.0 - using REST.' + + +def test_successful_rename(): + '''Test successful rename''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-rename', ZRR['success']), + ]) + module_args = { + 'from_name': 'test_svm', + 'name': 'test_new_svm', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_rename_no_from(): + '''Test error rename''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ]) + module_args = { + 'from_name': 'test_svm', + 'name': 'test_new_svm', + } + msg = 'Error renaming SVM test_new_svm: no SVM with from_name test_svm.' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_error_rename_zapi(): + '''Test error rename''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-rename', ZRR['error']), + ]) + module_args = { + 'from_name': 'test_svm', + 'name': 'test_new_svm', + } + msg = 'Error renaming SVM test_svm: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_successful_modify_language(): + '''Test successful modify language''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-modify', ZRR['success']), + ]) + _modify_options_with_expected_change('language', 'c') + + +def test_error_modify_language(): + '''Test error modify language''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-modify', ZRR['error']), + ]) + module_args = { + 'language': 'c', + } + msg = 'Error modifying SVM test_svm: NetApp API failed. Reason - 12345:synthetic error for UT purpose' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_error_modify_fixed_properties(): + '''Test error modifying a fixed property''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ]) + module_args = { + 'ipspace': 'new', + } + msg = 'Error modifying SVM test_svm: cannot modify ipspace - current: ansible_ipspace - desired: new.' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + module_args = { + 'ipspace': 'new', + 'root_volume': 'new_root' + } + msg = 'Error modifying SVM test_svm: cannot modify root_volume - current: ansible_vol - desired: new_root, '\ + 'ipspace - current: ansible_ipspace - desired: new.' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_successful_modify_snapshot_policy(): + '''Test successful modify language''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-modify', ZRR['success']), + ]) + _modify_options_with_expected_change( + 'snapshot_policy', 'new_snapshot_policy' + ) + + +def test_successful_modify_allowed_protocols(): + '''Test successful modify allowed protocols''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-modify', ZRR['success']), + ]) + _modify_options_with_expected_change( + 'allowed_protocols', 'nvme,fcp' + ) + + +def test_successful_modify_aggr_list(): + '''Test successful modify aggr-list''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-modify', ZRR['success']), + ]) + _modify_options_with_expected_change( + 'aggr_list', 'aggr_3,aggr_4' + ) + + +def test_successful_modify_aggr_list_star(): + '''Test successful modify aggr-list''' + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ('ZAPI', 'vserver-get-iter', ZRR['svm_record']), + ('ZAPI', 'vserver-modify', ZRR['success']), + ]) + module_args = { + 'aggr_list': '*' + } + results = create_and_apply(svm_module, DEFAULT_ARGS, module_args) + assert results['changed'] + assert_warning_was_raised("na_ontap_svm: changed always 'True' when aggr_list is '*'.") + + +def _modify_options_with_expected_change(arg0, arg1): + module_args = { + arg0: arg1, + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['generic_error']), + ]) + module_args = { + 'root_volume': 'whatever', + 'aggr_list': '*', + 'ignore_rest_unsupported_options': 'true', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == 'calling: svm/svms: got Expected error.' + + +def test_rest_error_unsupported_parm(): + register_responses([ + ]) + module_args = { + 'root_volume': 'not_supported_by_rest', + 'use_rest': 'always', + } + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == "REST API currently does not support 'root_volume'" + + +def test_rest_successfully_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ]) + assert create_and_apply(svm_module, DEFAULT_ARGS)['changed'] + + +def test_rest_error_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['generic_error']), + ]) + msg = 'Error in create: calling: svm/svms: got Expected error.' + assert create_and_apply(svm_module, DEFAULT_ARGS, fail=True)['msg'] == msg + + +def test_rest_create_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ]) + module_args = { + 'root_volume': 'whatever', + 'aggr_list': '*', + 'ignore_rest_unsupported_options': 'true', + } + assert not create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successful_delete(): + '''Test successful delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('DELETE', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']), + ]) + module_args = { + 'state': 'absent', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_delete(): + '''Test error delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('DELETE', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['generic_error']), + ]) + module_args = { + 'state': 'absent', + } + msg = 'Error in delete: calling: svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7: got Expected error.' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_error_delete_no_svm(): + '''Test error delete''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + my_obj = create_module(svm_module, DEFAULT_ARGS) + msg = 'Internal error, expecting SVM object in delete' + assert expect_and_capture_ansible_exception(my_obj.delete_vserver, 'fail')['msg'] == msg + + +def test_rest_delete_idempotency(): + '''Test delete idempotency''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ]) + module_args = { + 'state': 'absent', + } + assert not create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successful_rename(): + '''Test successful rename''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ('GET', 'svm/svms', SRR['svm_record']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']), + ]) + module_args = { + 'from_name': 'test_svm', + 'name': 'test_new_svm', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successful_modify_language(): + '''Test successful modify language''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']), + ]) + module_args = { + 'language': 'c', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successful_get(): + '''Test successful get''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ]) + module_args = { + 'admin_state': 'running', + 'language': 'c' + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + current = my_obj.get_vserver() + print(current) + assert current['services']['nfs']['allowed'] + assert not current['services']['cifs']['enabled'] + current = my_obj.get_vserver() + print(current) + assert not current['services']['nfs']['enabled'] + assert current['services']['cifs']['allowed'] + assert current['services']['iscsi']['allowed'] + + +def test_rest_successfully_create_ignore_zapi_option(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ]) + module_args = { + 'root_volume': 'whatever', + 'aggr_list': '*', + 'ignore_rest_unsupported_options': 'true', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_create_with_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ]) + module_args = { + 'services': {'nfs': {'allowed': True, 'enabled': True}, 'fcp': {'allowed': True, 'enabled': True}} + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_modify_with_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']), + ('POST', 'protocols/san/fcp/services', SRR['success']), + ]) + module_args = { + 'admin_state': 'stopped', + 'services': {'nfs': {'allowed': True, 'enabled': True}, 'fcp': {'allowed': True, 'enabled': True}} + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_enable_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('POST', 'protocols/san/fcp/services', SRR['success']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'services': {'nfs': {'allowed': True}, 'fcp': {'enabled': True}}} + current = {'services': {'nfs': {'allowed': True}}, 'uuid': 'uuid'} + assert my_obj.modify_services(modify, current) is None + + +def test_rest_successfully_reenable_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('PATCH', 'protocols/san/fcp/services/uuid', SRR['success']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'services': {'nfs': {'allowed': True}, 'fcp': {'enabled': True}}} + fcp_dict = {'_links': {'self': {'href': 'fcp_link'}}} + current = {'services': {'nfs': {'allowed': True}}, 'uuid': 'uuid', 'fcp': fcp_dict} + assert my_obj.modify_services(modify, current) is None + + +def test_rest_negative_enable_service(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'services': {'nfs': {'allowed': True}, 'bad_value': {'enabled': True}}, 'name': 'new_name'} + current = {'services': {'nfs': {'allowed': True}}, 'uuid': 'uuid'} + error = expect_and_capture_ansible_exception(my_obj.modify_services, 'fail', modify, current)['msg'] + assert error == 'Internal error, unexpecting service: bad_value.' + + +def test_rest_negative_modify_services(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('POST', 'protocols/san/fcp/services', SRR['generic_error']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'services': {'nfs': {'allowed': True}, 'fcp': {'enabled': True}}, 'name': 'new_name'} + current = {'services': {'nfs': {'allowed': True}}, 'uuid': 'uuid'} + error = expect_and_capture_ansible_exception(my_obj.modify_services, 'fail', modify, current)['msg'] + assert error == 'Error in modify service for fcp: calling: protocols/san/fcp/services: got Expected error.' + + +def test_rest_negative_modify_current_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'enabled_protocols': ['nfs', 'fcp']} + current = None + error = expect_and_capture_ansible_exception(my_obj.modify_vserver, 'fail', modify, current)['msg'] + assert error == 'Internal error, expecting SVM object in modify.' + + +def test_rest_negative_modify_modify_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {} + current = {'enabled_protocols': ['nfs'], 'disabled_protocols': ['fcp', 'iscsi', 'nvme'], 'uuid': 'uuid'} + error = expect_and_capture_ansible_exception(my_obj.modify_vserver, 'fail', modify, current)['msg'] + assert error == 'Internal error, expecting something to modify in modify.' + + +def test_rest_negative_modify_error_1(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('PATCH', 'svm/svms/uuid', SRR['generic_error']), # rename + ]) + module_args = { + 'admin_state': 'running', + 'language': 'klingon', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'enabled_protocols': ['nfs', 'fcp'], 'name': 'new_name', 'language': 'klingon'} + current = {'enabled_protocols': ['nfs'], 'disabled_protocols': ['fcp', 'iscsi', 'nvme'], 'uuid': 'uuid'} + error = expect_and_capture_ansible_exception(my_obj.modify_vserver, 'fail', modify, current)['msg'] + assert error == 'Error in rename: calling: svm/svms/uuid: got Expected error.' + + +def test_rest_negative_modify_error_2(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('PATCH', 'svm/svms/uuid', SRR['success']), # rename + ('PATCH', 'svm/svms/uuid', SRR['generic_error']), # modify + ]) + module_args = { + 'admin_state': 'running', + 'language': 'klingon', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + modify = {'enabled_protocols': ['nfs', 'fcp'], 'name': 'new_name', 'language': 'klingon'} + current = {'enabled_protocols': ['nfs'], 'disabled_protocols': ['fcp', 'iscsi', 'nvme'], 'uuid': 'uuid'} + error = expect_and_capture_ansible_exception(my_obj.modify_vserver, 'fail', modify, current)['msg'] + assert error == 'Error in modify: calling: svm/svms/uuid: got Expected error.' + + +def test_rest_successfully_get_older_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'private/cli/vserver', SRR['cli_record']), # get protocols + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + assert not create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_add_protocols_on_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ('PATCH', 'private/cli/vserver/add-protocols', SRR['success']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_add_remove_protocols_on_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'private/cli/vserver', SRR['cli_record']), # get protocols + ('PATCH', 'private/cli/vserver/add-protocols', SRR['success']), + ('PATCH', 'private/cli/vserver/remove-protocols', SRR['success']) + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}, 'iscsi': {'allowed': False}, 'fcp': {'allowed': True}} + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_add_remove_protocols_on_modify_old_style(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'private/cli/vserver', SRR['cli_record']), # get protocols + ('PATCH', 'private/cli/vserver/add-protocols', SRR['success']), + ('PATCH', 'private/cli/vserver/remove-protocols', SRR['success']) + ]) + module_args = { + 'admin_state': 'running', + 'allowed_protocols': ['nfs', 'fcp'] + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_validate_int_or_string_as_int(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + assert create_module(svm_module, DEFAULT_ARGS, module_args).validate_int_or_string('10', 'whatever') is None + + +def test_validate_int_or_string_as_str(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + assert create_module(svm_module, DEFAULT_ARGS, module_args).validate_int_or_string('whatever', 'whatever') is None + + +def test_negative_validate_int_or_string(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ]) + module_args = { + 'admin_state': 'running', + 'services': {'nfs': {'allowed': True, 'enabled': True}} + } + astring = 'testme' + error = expect_and_capture_ansible_exception(create_module(svm_module, DEFAULT_ARGS, module_args).validate_int_or_string, 'fail', '10a', astring)['msg'] + assert "expecting int value or '%s'" % astring in error + + +def test_rest_successfully_modify_with_admin_state(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']) # change admin_state + ]) + module_args = {'admin_state': 'stopped'} + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_successfully_modify_with_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ('GET', 'svm/svms', SRR['svm_record']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']) # change admin_state + ]) + module_args = {'admin_state': 'stopped'} + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + +# Tests for web services - 4 cases +# ZAPI: not supported +# REST < 9.8: not supported +# REST 9.8, 9.9. 9.10.0: only certificate is supported, using deprecated certificate fields in svs/svms +# REST >= 9.10.1: all options are supported, using svm/svms/uuid/web + + +def test_web_services_error_zapi(): + register_responses([ + ('GET', 'cluster', SRR['is_zapi']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = 'using web requires ONTAP 9.8 or later and REST must be enabled - Unreachable - using ZAPI.' + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_error_9_7_5(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_7_5']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = 'using web requires ONTAP 9.8 or later and REST must be enabled - ONTAP version: 9.7.5 - using REST.' + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_error_9_8_0(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + msg = "using ('client_enabled', 'ocsp_enabled') requires ONTAP 9.10.1 or later and REST must be enabled - ONTAP version: 9.8.0 - using REST." + module_args = {'web': {'certificate': 'cert_name', 'client_enabled': True}} + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + module_args = {'web': {'certificate': 'cert_name', 'ocsp_enabled': True}} + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_8_0_none_set(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'private/cli/vserver', SRR['cli_record']), # get protocols + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']) # change certificate + ]) + module_args = {'web': {'certificate': 'cert_name'}} + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_web_services_modify_certificate_9_8_0_other_set(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record_cert2']), + ('GET', 'private/cli/vserver', SRR['cli_record']), # get protocols + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['success']) # change certificate + ]) + module_args = {'web': {'certificate': 'cert_name'}} + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_web_services_modify_certificate_9_8_0_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record_cert1']), + ('GET', 'private/cli/vserver', SRR['cli_record']), # get protocols + ]) + module_args = {'web': {'certificate': 'cert_name'}} + assert not create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_web_services_modify_certificate_9_8_0_error_not_found(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error certificate not found: {'name': 'cert_name'}. Current certificates with type=server: ['cert_1']" + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_8_0_error_api1(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['generic_error']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error retrieving certificate {'name': 'cert_name'}: calling: security/certificates: got Expected error." + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_8_0_error_api2(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['generic_error']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error retrieving certificates: calling: security/certificates: got Expected error." + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_10_1_none_set(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['zero_records']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['success']) # change certificate + ]) + module_args = {'web': {'certificate': 'cert_name'}} + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_web_services_modify_certificate_9_10_1_other_set(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['svm_web_record_2']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['success']) # change certificate + ]) + module_args = {'web': {'certificate': 'cert_name'}} + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_web_services_modify_certificate_9_10_1_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['svm_web_record_1']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + assert not create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_web_services_modify_certificate_9_10_1_error_not_found(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['zero_records']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error certificate not found: {'name': 'cert_name'}. Current certificates with type=server: ['cert_1']" + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + msg = "Error certificate not found: {'name': 'cert_name'}. Current certificates with type=server: []" + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_10_1_error_api1(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['generic_error']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error retrieving certificate {'name': 'cert_name'}: calling: security/certificates: got Expected error." + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_10_1_error_api2(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['zero_records']), + ('GET', 'security/certificates', SRR['generic_error']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error retrieving certificates: calling: security/certificates: got Expected error." + assert create_module(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_10_1_error_api3(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['generic_error']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = 'Error retrieving web info: calling: svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web: got Expected error.' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_10_1_error_api4(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ('GET', 'svm/svms', SRR['svm_record']), + ('GET', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['svm_web_record_2']), + ('PATCH', 'svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web', SRR['generic_error']) # change certificate + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error in modify web service for {'certificate': {'uuid': 'cert_uuid_1'}}: "\ + "calling: svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web: got Expected error." + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_web_services_modify_certificate_9_10_1_warning(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/certificates', SRR['certificate_record_1']), + ]) + module_args = {'web': {'certificate': 'cert_name'}} + msg = "Error in modify web service for {'certificate': {'uuid': 'cert_uuid_1'}}: "\ + "calling: svm/svms/09e9fd5e-8ebd-11e9-b162-005056b39fe7/web: got Expected error." + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + assert my_obj.modify_web_services({}, {'uuid': 'uuid'}) is None + assert_warning_was_raised('Nothing to change: {}') + clear_warnings() + assert my_obj.modify_web_services({'certificate': {'name': 'whatever'}}, {'uuid': 'uuid'}) is None + assert_warning_was_raised("Nothing to change: {'certificate': {}}") + clear_warnings() + assert my_obj.modify_web_services({'certificate': {}}, {'uuid': 'uuid'}) is None + assert_warning_was_raised("Nothing to change: {'certificate': {}}") + + +def test_rest_cli_max_volumes_get(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['cli_record']), + ]) + module_args = { + 'max_volumes': 3333, + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + record = my_obj.get_vserver() + assert 'name' in SRR['svm_record_ap'][1]['records'][0] + assert 'max_volumes' not in SRR['svm_record_ap'][1]['records'][0] + assert 'max_volumes' in record + + +def test_rest_cli_max_volumes_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ('PATCH', 'private/cli/vserver', SRR['success']), + ]) + module_args = { + 'max_volumes': 3333, + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_cli_max_volumes_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['cli_record']), + ('PATCH', 'private/cli/vserver', SRR['success']), + ]) + module_args = { + 'max_volumes': 3333, + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_rest_cli_max_volumes_get(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['generic_error']), + ]) + module_args = { + 'max_volumes': 3333, + } + msg = 'Error getting vserver info: calling: private/cli/vserver: got Expected error. - None' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_error_rest_cli_max_volumes_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['cli_record']), + ('PATCH', 'private/cli/vserver', SRR['generic_error']), + ]) + module_args = { + 'max_volumes': 3333, + } + msg = 'Error updating max_volumes: calling: private/cli/vserver: got Expected error. - None' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_cli_add_remove_protocols_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ('PATCH', 'private/cli/vserver/add-protocols', SRR['success']), + ('PATCH', 'private/cli/vserver/remove-protocols', SRR['success']), + ]) + module_args = { + 'allowed_protocols': 'nfs,cifs', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_rest_cli_add_protocols_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['zero_records']), + ('POST', 'svm/svms', SRR['success']), + ('PATCH', 'private/cli/vserver/add-protocols', SRR['generic_error']), + ]) + module_args = { + 'allowed_protocols': 'nfs,cifs', + } + msg = 'Error adding protocols: calling: private/cli/vserver/add-protocols: got Expected error. - None' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_cli_remove_protocols_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['cli_record']), + ('PATCH', 'private/cli/vserver/remove-protocols', SRR['success']), + ]) + module_args = { + 'allowed_protocols': 'nfs,cifs', + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_rest_cli_remove_protocols_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['cli_record']), + ('PATCH', 'private/cli/vserver/remove-protocols', SRR['generic_error']), + ]) + module_args = { + 'allowed_protocols': 'nfs,cifs', + } + msg = 'Error removing protocols: calling: private/cli/vserver/remove-protocols: got Expected error. - None' + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == msg + + +def test_add_parameter_to_dict(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'name': 'svm', + 'ipspace': 'ipspace', + 'max_volumes': 3333, + } + my_obj = create_module(svm_module, DEFAULT_ARGS, module_args) + test_dict = {} + my_obj.add_parameter_to_dict(test_dict, 'name', None) + my_obj.add_parameter_to_dict(test_dict, 'ipspace', 'ipspace_key') + my_obj.add_parameter_to_dict(test_dict, 'max_volumes', None, True) + print(test_dict) + assert test_dict['name'] == 'svm' + assert test_dict['ipspace_key'] == 'ipspace' + assert test_dict['max_volumes'] == '3333' + + +def test_rest_language_match(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'svm/svms', SRR['svm_record_ap']), + ('GET', 'private/cli/vserver', SRR['cli_record']), + ('PATCH', 'svm/svms/svm_uuid', SRR['success']), + ]) + module_args = { + 'language': 'de.UTF-8' + } + assert create_and_apply(svm_module, DEFAULT_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised( + 'Attempting to change language from ONTAP value de.utf_8 to de.UTF-8. Use de.utf_8 to suppress this warning and maintain idempotency.') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py new file mode 100644 index 000000000..b548739a8 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_template.py @@ -0,0 +1,86 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_cg_snapshot \ + import NetAppONTAPCGSnapshot as my_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, parm1=None): + ''' save arguments ''' + self.type = kind + self.parm1 = parm1 + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'vserver': + xml = self.build_vserver_info(self.parm1) + self.xml_out = xml + return xml + + @staticmethod + def build_vserver_info(vserver): + ''' build xml data for vserser-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = netapp_utils.zapi.NaElement('attributes-list') + attributes.add_node_with_children('vserver-info', + **{'vserver-name': vserver}) + xml.add_child_elem(attributes) + # print(xml.to_string()) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.server = MockONTAPConnection() + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_ensure_command_called(self): + ''' a more interesting test ''' +# TODO: change argument names/values + set_module_args({ + 'vserver': 'vserver', + 'volumes': 'volumes', + 'snapshot': 'snapshot', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + }) + my_obj = my_module() + my_obj.server = self.server + with pytest.raises(AnsibleFailJson) as exc: + # It may not be a good idea to start with apply + # More atomic methods can be easier to mock + # Hint: start with get methods, as they are called first + my_obj.apply() +# TODO: change message, and maybe test contents + msg = 'Error fetching CG ID for CG commit snapshot' + assert exc.value.args[0]['msg'] == msg diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py new file mode 100644 index 000000000..5f1f502c1 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_ucadapter.py @@ -0,0 +1,173 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_ucadapter ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_ucadapter \ + import NetAppOntapadapter as ucadapter_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + +DEFAULT_ARGS = { + 'hostname': '10.0.0.0', + 'username': 'user', + 'password': 'pass', + 'node_name': 'node1', + 'adapter_name': '0f', + 'mode': 'fc', + 'type': 'target', + 'use_rest': 'never' +} + +ucm_info_mode_fc = { + 'attributes': { + 'uc-adapter-info': { + 'mode': 'fc', + 'pending-mode': 'abc', + 'type': 'target', + 'pending-type': 'intitiator', + 'status': 'up', + } + } +} + +ucm_info_mode_cna = { + 'attributes': { + 'uc-adapter-info': { + 'mode': 'cna', + 'pending-mode': 'cna', + 'type': 'target', + 'pending-type': 'intitiator', + 'status': 'up', + } + } +} + + +ZRR = zapi_responses({ + 'ucm_info': build_zapi_response(ucm_info_mode_fc), + 'ucm_info_cna': build_zapi_response(ucm_info_mode_cna) +}) + + +SRR = rest_responses({ + 'ucm_info': (200, {"records": [{ + 'current_mode': 'fc', + 'current_type': 'target', + 'status_admin': 'up' + }], "num_records": 1}, None), + 'ucm_info_cna': (200, {"records": [{ + 'current_mode': 'cna', + 'current_type': 'target', + 'status_admin': 'up' + }], "num_records": 1}, None), + 'fc_adapter_info': (200, {"records": [{ + 'uuid': 'abcdef' + }], "num_records": 1}, None) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "node_name", "adapter_name"] + error = create_module(ucadapter_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_ensure_ucadapter_get_called(): + ''' fetching ucadapter details ''' + register_responses([ + ('ucm-adapter-get', ZRR['empty']) + ]) + ucm_obj = create_module(ucadapter_module, DEFAULT_ARGS) + assert ucm_obj.get_adapter() is None + + +def test_change_mode_from_cna_to_fc(): + ''' configuring ucadaptor and checking idempotency ''' + register_responses([ + ('ucm-adapter-get', ZRR['ucm_info_cna']), + ('fcp-adapter-config-down', ZRR['success']), + ('ucm-adapter-modify', ZRR['success']), + ('fcp-adapter-config-up', ZRR['success']), + ('ucm-adapter-get', ZRR['ucm_info_cna']) + ]) + assert create_and_apply(ucadapter_module, DEFAULT_ARGS)['changed'] + args = {'mode': 'cna'} + assert not create_and_apply(ucadapter_module, DEFAULT_ARGS, args)['changed'] + + +def test_change_mode_from_fc_to_cna(): + register_responses([ + ('ucm-adapter-get', ZRR['ucm_info']), + ('fcp-adapter-config-down', ZRR['success']), + ('ucm-adapter-modify', ZRR['success']), + ('fcp-adapter-config-up', ZRR['success']), + ]) + args = {'mode': 'cna'} + assert create_and_apply(ucadapter_module, DEFAULT_ARGS, args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ucm-adapter-get', ZRR['error']), + ('ucm-adapter-modify', ZRR['error']), + ('fcp-adapter-config-down', ZRR['error']), + ('fcp-adapter-config-up', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'network/fc/ports', SRR['generic_error']), + ('GET', 'private/cli/ucadmin', SRR['generic_error']), + ('PATCH', 'private/cli/ucadmin', SRR['generic_error']), + ('PATCH', 'network/fc/ports/abcdef', SRR['generic_error']), + ('PATCH', 'network/fc/ports/abcdef', SRR['generic_error']), + ('GET', 'network/fc/ports', SRR['empty_records']) + ]) + ucm_obj = create_module(ucadapter_module, DEFAULT_ARGS) + assert 'Error fetching ucadapter' in expect_and_capture_ansible_exception(ucm_obj.get_adapter, 'fail')['msg'] + assert 'Error modifying adapter' in expect_and_capture_ansible_exception(ucm_obj.modify_adapter, 'fail')['msg'] + assert 'Error trying to down' in expect_and_capture_ansible_exception(ucm_obj.online_or_offline_adapter, 'fail', 'down', '0f')['msg'] + assert 'Error trying to up' in expect_and_capture_ansible_exception(ucm_obj.online_or_offline_adapter, 'fail', 'up', '0f')['msg'] + + ucm_obj = create_module(ucadapter_module, DEFAULT_ARGS, {'use_rest': 'always'}) + ucm_obj.adapters_uuids = {'0f': 'abcdef'} + assert 'Error fetching adapter 0f uuid' in expect_and_capture_ansible_exception(ucm_obj.get_adapter_uuid, 'fail', '0f')['msg'] + assert 'Error fetching ucadapter' in expect_and_capture_ansible_exception(ucm_obj.get_adapter, 'fail')['msg'] + assert 'Error modifying adapter' in expect_and_capture_ansible_exception(ucm_obj.modify_adapter, 'fail')['msg'] + assert 'Error trying to down' in expect_and_capture_ansible_exception(ucm_obj.online_or_offline_adapter, 'fail', 'down', '0f')['msg'] + assert 'Error trying to up' in expect_and_capture_ansible_exception(ucm_obj.online_or_offline_adapter, 'fail', 'up', '0f')['msg'] + assert 'Error: Adapter(s) 0f not exist' in expect_and_capture_ansible_exception(ucm_obj.get_adapters_uuids, 'fail')['msg'] + + +def test_change_mode_from_cna_to_fc_rest(): + ''' configuring ucadaptor ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/ucadmin', SRR['ucm_info_cna']), + ('GET', 'network/fc/ports', SRR['fc_adapter_info']), + ('PATCH', 'network/fc/ports/abcdef', SRR['success']), + ('PATCH', 'private/cli/ucadmin', SRR['success']), + ('PATCH', 'network/fc/ports/abcdef', SRR['success']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'private/cli/ucadmin', SRR['ucm_info_cna']) + ]) + assert create_and_apply(ucadapter_module, DEFAULT_ARGS, {'use_rest': 'always'})['changed'] + args = {'mode': 'cna', 'use_rest': 'always'} + assert not create_and_apply(ucadapter_module, DEFAULT_ARGS, args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py new file mode 100644 index 000000000..a29779e5c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_group.py @@ -0,0 +1,545 @@ +# (c) 2019-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_group \ + import NetAppOntapUnixGroup as group_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'user_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + "name": "user_group", + "id": 1, + "users": [{"name": "user1"}, {"name": "user2"}], + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + } + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + +unix_group_info = { + 'num-records': 1, + 'attributes-list': { + 'unix-group-info': { + 'group-name': 'user_group', + 'group-id': '1', + 'users': [{'unix-user-name': {'user-name': 'user1'}}] + } + } +} + + +ZRR = zapi_responses({ + 'unix_group_info': build_zapi_response(unix_group_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'name': 'user_group', + 'id': '1', + 'use_rest': 'never', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + group_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_user_group(): + ''' Test if get_unix_group returns None for non-existent group ''' + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['empty']) + ]) + user_obj = create_module(group_module, DEFAULT_ARGS) + result = user_obj.get_unix_group() + assert result is None + + +def test_get_user_group(): + ''' Test if get_unix_group returns unix group ''' + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']) + ]) + user_obj = create_module(group_module, DEFAULT_ARGS) + result = user_obj.get_unix_group() + assert result + + +def test_get_error_existent_user_group(): + ''' Test if get_unix_user returns existent user group ''' + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['error']) + ]) + group_module_object = create_module(group_module, DEFAULT_ARGS) + msg = "Error getting UNIX group" + assert msg in expect_and_capture_ansible_exception(group_module_object.get_unix_group, 'fail')['msg'] + + +def test_create_unix_group_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['empty']), + ('name-mapping-unix-group-create', ZRR['success']), + ]) + module_args = { + 'name': 'user_group', + 'id': '1' + } + assert create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_unix_group_with_user_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['empty']), + ('name-mapping-unix-group-create', ZRR['success']), + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-add-user', ZRR['success']) + ]) + module_args = { + 'name': 'user_group', + 'id': '1', + 'users': ['user1', 'user2'] + } + assert create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_unix_user_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['empty']), + ('name-mapping-unix-group-create', ZRR['error']), + ]) + module_args = { + 'name': 'user_group', + 'id': '1', + 'users': ['user1', 'user2'] + } + error = create_and_apply(group_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error creating UNIX group" + assert msg in error + + +def test_delete_unix_group_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-destroy', ZRR['success']), + ]) + module_args = { + 'name': 'user_group', + 'state': 'absent' + } + assert create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_remove_unix_group_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-destroy', ZRR['error']), + ]) + module_args = { + 'name': 'user_group', + 'state': 'absent' + } + error = create_and_apply(group_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error removing UNIX group" + assert msg in error + + +def test_create_idempotent(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']) + ]) + module_args = { + 'state': 'present', + 'name': 'user_group', + 'id': '1', + } + assert not create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotent(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['empty']) + ]) + module_args = { + 'state': 'absent', + 'name': 'user_group', + } + assert not create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_unix_group_id_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-modify', ZRR['success']), + ]) + module_args = { + 'name': 'user_group', + 'id': '2' + } + assert create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_unix_group_id_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-modify', ZRR['error']), + ]) + module_args = { + 'name': 'user_group', + 'id': '2' + } + error = create_and_apply(group_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error modifying UNIX group" + assert msg in error + + +def test_add_unix_group_user_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-add-user', ZRR['success']) + ]) + module_args = { + 'name': 'user_group', + 'users': ['user1', 'user2'] + } + assert create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_add_unix_group_user_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-add-user', ZRR['error']) + ]) + module_args = { + 'name': 'user_group', + 'users': ['user1', 'user2'] + } + error = create_and_apply(group_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error adding user" + assert msg in error + + +def test_delete_unix_group_user_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-delete-user', ZRR['success']) + ]) + module_args = { + 'name': 'user_group', + 'users': '' + } + assert create_and_apply(group_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete_unix_group_user_zapi(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-get-iter', ZRR['unix_group_info']), + ('name-mapping-unix-group-delete-user', ZRR['error']) + ]) + module_args = { + 'name': 'user_group', + 'users': '' + } + error = create_and_apply(group_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error deleting user" + assert msg in error + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('name-mapping-unix-group-get-iter', ZRR['error']), + ('name-mapping-unix-group-create', ZRR['error']), + ('name-mapping-unix-group-destroy', ZRR['error']), + ('name-mapping-unix-group-modify', ZRR['error']), + ]) + module_args = {'use_rest': 'never', 'name': 'user_group'} + my_obj = create_module(group_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.get_unix_group, 'fail')['msg'] + assert 'Error getting UNIX group user_group: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.create_unix_group, 'fail')['msg'] + assert 'Error creating UNIX group user_group: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_unix_group, 'fail')['msg'] + assert 'Error removing UNIX group user_group: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.modify_unix_group, 'fail', 'name-mapping-unix-group-modify')['msg'] + assert 'Error modifying UNIX group user_group: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'vserver', + 'name': 'user_group', + 'id': '1' +} + + +def test_get_nonexistent_user_group_rest(): + ''' Test if get_unix_user returns None for non-existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['empty_records']), + ]) + user_obj = create_module(group_module, ARGS_REST) + result = user_obj.get_unix_group_rest() + assert result is None + + +def test_get_existent_user_group_rest(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ]) + user_obj = create_module(group_module, ARGS_REST) + result = user_obj.get_unix_group_rest() + assert result + + +def test_get_error_existent_user_group_rest(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['generic_error']), + ]) + error = create_and_apply(group_module, ARGS_REST, fail=True)['msg'] + msg = "Error getting UNIX group:" + assert msg in error + + +def test_ontap_version_rest(): + ''' Test ONTAP version ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ]) + module_args = {'use_rest': 'always'} + error = create_module(group_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error: REST requires ONTAP 9.9.1 or later for UNIX group APIs." + assert msg in error + + +def test_create_unix_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['empty_records']), + ('POST', 'name-services/unix-groups', SRR['empty_good']), + ]) + module_args = { + 'name': 'user_group', + 'id': 1, + } + assert create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_create_unix_group_with_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['empty_records']), + ('POST', 'name-services/unix-groups', SRR['empty_good']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('POST', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group/users', SRR['empty_records']) + ]) + module_args = { + 'name': 'user_group', + 'id': 1, + 'users': ['user1', 'user2', 'user3'] + } + assert create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_error_create_unix_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['empty_records']), + ('POST', 'name-services/unix-groups', SRR['generic_error']), + ]) + module_args = { + 'name': 'user_group', + 'id': 1, + } + error = create_and_apply(group_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error creating UNIX group:" + assert msg in error + + +def test_delete_unix_group_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('DELETE', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group', SRR['empty_good']), + ]) + module_args = { + 'name': 'user_group', + 'state': 'absent' + } + assert create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_error_remove_unix_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('DELETE', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group', SRR['generic_error']), + ]) + module_args = { + 'name': 'user_group', + 'state': 'absent' + } + error = create_and_apply(group_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error deleting UNIX group:" + assert msg in error + + +def test_modify_unix_group_id_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('PATCH', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group', SRR['empty_good']) + ]) + module_args = { + 'name': 'user_group', + 'id': '2' + } + assert create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_error_modify_unix_group_id_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('PATCH', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group', SRR['generic_error']) + ]) + module_args = { + 'name': 'user_group', + 'id': '2' + } + error = create_and_apply(group_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on modifying UNIX group:" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ]) + module_args = { + 'state': 'present', + 'name': 'user_group', + 'id': '1', + } + assert not create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent', + 'name': 'user_group' + } + assert not create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_add_unix_group_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('POST', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group/users', SRR['empty_records']) + ]) + module_args = { + 'name': 'user_group', + 'users': ['user1', 'user2', 'user3'] + } + assert create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_error_add_unix_group_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('POST', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group/users', SRR['generic_error']) + ]) + module_args = { + 'name': 'user_group', + 'users': ['user1', 'user2', 'user3'] + } + error = create_and_apply(group_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error Adding user to UNIX group:" + assert msg in error + + +def test_delete_unix_group_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('DELETE', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group/users/user2', SRR['empty_records']) + ]) + module_args = { + 'users': ["user1"] + } + assert create_and_apply(group_module, ARGS_REST, module_args)['changed'] + + +def test_error_delete_unix_group_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_1']), + ('GET', 'name-services/unix-groups', SRR['user_record']), + ('DELETE', 'name-services/unix-groups/671aa46e-11ad-11ec-a267-005056b30cfa/user_group/users/user2', SRR['generic_error']) + ]) + module_args = { + 'users': ["user1"] + } + error = create_and_apply(group_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error removing user from UNIX group:" + assert msg in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py new file mode 100644 index 000000000..1f6fc0847 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_unix_user.py @@ -0,0 +1,465 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_unix_user \ + import NetAppOntapUnixUser as user_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'user_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + "name": "user", + "primary_gid": 2, + "id": 1, + "full_name": "test_user", + "target": { + "name": "20:05:00:50:56:b3:0c:fa" + } + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + +unix_user_info = { + 'num-records': 1, + 'attributes-list': { + 'unix-user-info': { + 'name': 'user', + 'user-id': '1', + 'group-id': 2, + 'full-name': 'test_user'} + } +} + +ZRR = zapi_responses({ + 'unix_user_info': build_zapi_response(unix_user_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'name': 'user', + 'group_id': 2, + 'id': '1', + 'full_name': 'test_user', + 'use_rest': 'never', +} + + +DEFAULT_NO_USER = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'name': 'no_user', + 'group_id': '2', + 'id': '1', + 'full_name': 'test_user', + 'use_rest': 'never', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + user_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get_nonexistent_user(): + ''' Test if get_unix_user returns None for non-existent user ''' + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['empty']) + ]) + user_obj = create_module(user_module, DEFAULT_NO_USER) + result = user_obj.get_unix_user() + assert result is None + + +def test_get_existent_user(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']) + ]) + user_obj = create_module(user_module, DEFAULT_ARGS) + result = user_obj.get_unix_user() + assert result + + +def test_get_error_existent_user(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['error']) + ]) + user_module_object = create_module(user_module, DEFAULT_ARGS) + msg = "Error getting UNIX user" + assert msg in expect_and_capture_ansible_exception(user_module_object.get_unix_user, 'fail')['msg'] + + +def test_create_unix_user_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['empty']), + ('name-mapping-unix-user-create', ZRR['success']), + ]) + module_args = { + 'name': 'user', + 'group_id': '2', + 'id': '1', + 'full_name': 'test_user', + } + assert create_and_apply(user_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_create_unix_user_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['empty']), + ('name-mapping-unix-user-create', ZRR['error']), + ]) + module_args = { + 'name': 'user4', + 'group_id': '4', + 'id': '4', + 'full_name': 'test_user4', + } + error = create_and_apply(user_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error creating UNIX user" + assert msg in error + + +def test_delete_unix_user_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']), + ('name-mapping-unix-user-destroy', ZRR['success']), + ]) + module_args = { + 'name': 'user', + 'group_id': '2', + 'id': '1', + 'full_name': 'test_user', + 'state': 'absent' + } + assert create_and_apply(user_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_remove_unix_user_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']), + ('name-mapping-unix-user-destroy', ZRR['error']), + ]) + module_args = { + 'name': 'user', + 'group_id': '2', + 'id': '1', + 'full_name': 'test_user', + 'state': 'absent' + } + error = create_and_apply(user_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error removing UNIX user" + assert msg in error + + +def test_modify_unix_user_id_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']), + ('name-mapping-unix-user-modify', ZRR['success']), + ]) + module_args = { + 'group_id': '3', + 'id': '2' + } + assert create_and_apply(user_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_unix_user_full_name_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']), + ('name-mapping-unix-user-modify', ZRR['success']), + ]) + module_args = { + 'full_name': 'test_user1' + } + assert create_and_apply(user_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_unix_user_full_name_zapi(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']), + ('name-mapping-unix-user-modify', ZRR['error']), + ]) + module_args = { + 'full_name': 'test_user1' + } + error = create_and_apply(user_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + msg = "Error modifying UNIX user" + assert msg in error + + +def test_create_idempotent(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['unix_user_info']) + ]) + module_args = { + 'state': 'present', + 'name': 'user', + 'group_id': 2, + 'id': '1', + 'full_name': 'test_user', + } + assert not create_and_apply(user_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_idempotent(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['empty']) + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(user_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('name-mapping-unix-user-get-iter', ZRR['error']), + ('name-mapping-unix-user-create', ZRR['error']), + ('name-mapping-unix-user-destroy', ZRR['error']), + ('name-mapping-unix-user-modify', ZRR['error']) + ]) + module_args = {'id': 5} + my_obj = create_module(user_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.get_unix_user, 'fail')['msg'] + assert 'Error getting UNIX user user: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.create_unix_user, 'fail')['msg'] + assert 'Error creating UNIX user user: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.delete_unix_user, 'fail')['msg'] + assert 'Error removing UNIX user user: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.modify_unix_user, 'fail', 'name-mapping-unix-user-modify')['msg'] + assert 'Error modifying UNIX user user: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'vserver', + 'name': 'user', + 'primary_gid': 2, + 'id': 1, + 'full_name': 'test_user' +} + +REST_NO_USER = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'vserver', + 'name': 'user5', + 'primary_gid': 2, + 'id': 1, + 'full_name': 'test_user' +} + + +def test_get_nonexistent_user_rest_rest(): + ''' Test if get_unix_user returns None for non-existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['empty_records']), + ]) + user_obj = create_module(user_module, REST_NO_USER) + result = user_obj.get_unix_user_rest() + assert result is None + + +def test_get_existent_user_rest(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ]) + user_obj = create_module(user_module, ARGS_REST) + result = user_obj.get_unix_user_rest() + assert result + + +def test_get_error_existent_user_rest(): + ''' Test if get_unix_user returns existent user ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['generic_error']), + ]) + error = create_and_apply(user_module, ARGS_REST, fail=True)['msg'] + msg = "Error on getting unix-user info:" + assert msg in error + + +def test_create_unix_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['empty_records']), + ('POST', 'name-services/unix-users', SRR['empty_good']), + ]) + module_args = { + 'name': 'user', + 'primary_gid': 2, + 'id': 1, + 'full_name': 'test_user', + } + assert create_and_apply(user_module, ARGS_REST, module_args)['changed'] + + +def test_error_create_unix_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['empty_records']), + ('POST', 'name-services/unix-users', SRR['generic_error']), + ]) + module_args = { + 'name': 'user4', + 'primary_gid': 4, + 'id': 4, + 'full_name': 'test_user4', + } + error = create_and_apply(user_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on creating unix-user:" + assert msg in error + + +def test_delete_unix_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ('DELETE', 'name-services/unix-users/671aa46e-11ad-11ec-a267-005056b30cfa/user', SRR['empty_good']), + ]) + module_args = { + 'name': 'user', + 'group_id': '2', + 'id': '1', + 'full_name': 'test_user', + 'state': 'absent' + } + assert create_and_apply(user_module, ARGS_REST, module_args)['changed'] + + +def test_error_remove_unix_user_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ('DELETE', 'name-services/unix-users/671aa46e-11ad-11ec-a267-005056b30cfa/user', SRR['generic_error']) + ]) + module_args = { + 'name': 'user', + 'id': '1', + 'state': 'absent' + } + error = create_and_apply(user_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on deleting unix-user" + assert msg in error + + +def test_modify_unix_user_id_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ('PATCH', 'name-services/unix-users/671aa46e-11ad-11ec-a267-005056b30cfa/user', SRR['empty_good']) + ]) + module_args = { + 'name': 'user', + 'group_id': '3', + 'id': '2' + } + assert create_and_apply(user_module, ARGS_REST, module_args)['changed'] + + +def test_modify_unix_user_full_name_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ('PATCH', 'name-services/unix-users/671aa46e-11ad-11ec-a267-005056b30cfa/user', SRR['empty_good']) + ]) + module_args = { + 'name': 'user', + 'full_name': 'test_user1' + } + assert create_and_apply(user_module, ARGS_REST, module_args)['changed'] + + +def test_error_modify_unix_user_full_name_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ('PATCH', 'name-services/unix-users/671aa46e-11ad-11ec-a267-005056b30cfa/user', SRR['generic_error']) + ]) + module_args = { + 'name': 'user', + 'full_name': 'test_user1' + } + error = create_and_apply(user_module, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on modifying unix-user:" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['user_record']), + ]) + module_args = { + 'state': 'present', + 'name': 'user', + 'group_id': 2, + 'id': '1', + 'full_name': 'test_user', + } + assert not create_and_apply(user_module, ARGS_REST, module_args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'name-services/unix-users', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent' + } + assert not create_and_apply(user_module, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py new file mode 100644 index 000000000..4b3294798 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user.py @@ -0,0 +1,744 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_user ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_error_message, rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_user import NetAppOntapUser as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'repeated_password': (400, None, {'message': "New password must be different than the old password."}), + 'get_uuid': (200, {'owner': {'uuid': 'ansible'}}, None), + 'get_user_rest': (200, + {'num_records': 1, + 'records': [{'owner': {'uuid': 'ansible_vserver'}, + 'name': 'abcd'}]}, None), + 'get_user_rest_multiple': (200, + {'num_records': 2, + 'records': [{'owner': {'uuid': 'ansible_vserver'}, + 'name': 'abcd'}, + {}]}, None), + 'get_user_details_rest': (200, + {'role': {'name': 'vsadmin'}, + 'applications': [{'application': 'http'}], + 'locked': False}, None), + 'get_user_details_rest_no_pwd': (200, # locked is absent if no password was set + {'role': {'name': 'vsadmin'}, + 'applications': [{'application': 'http'}], + }, None) +}, True) + + +def login_info(locked, role_name, apps): + attributes_list = [] + for app in apps: + if app in ('console', 'service-processor'): + attributes_list.append( + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': app, 'authentication-method': 'password'}} + ) + if app in ('ssh',): + attributes_list.append( + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': 'ssh', 'authentication-method': 'publickey', + 'second-authentication-method': 'password'}}, + ) + if app in ('http',): + attributes_list.extend([ + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': 'http', 'authentication-method': 'password'}}, + ]) + return { + 'num-records': len(attributes_list), + 'attributes-list': attributes_list + } + + +ZRR = zapi_responses({ + 'login_locked_user': build_zapi_response(login_info("true", 'user', ['console', 'ssh'])), + 'login_unlocked_user': build_zapi_response(login_info("False", 'user', ['console', 'ssh'])), + 'login_unlocked_user_http': build_zapi_response(login_info("False", 'user', ['http'])), + 'login_unlocked_user_service_processor': build_zapi_response(login_info("False", 'user', ['service-processor'])), + 'user_not_found': build_zapi_error('16034', "This exception should not be seen"), + 'internal_error': build_zapi_error('13114', "Forcing an internal error"), + 'reused_password': build_zapi_error('13214', "New password must be different than last 6 passwords."), +}, True) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'user_name', + 'vserver': 'vserver', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + } + print('Info: %s' % call_main(my_main, {}, module_args, fail=True)['msg']) + + +def test_module_fail_when_vserver_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'user_name', + } + assert 'Error: vserver is required' in call_main(my_main, {}, module_args, fail=True)['msg'] + + +def test_ensure_user_get_called(): + ''' a more interesting test ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'role_name': 'test', + 'applications': 'http', + 'authentication_method': 'password', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + # app = dict(application='testapp', authentication_methods=['testam']) + user_info = my_obj.get_user() + print('Info: test_user_get: %s' % repr(user_info)) + assert user_info is None + + +def test_ensure_user_get_called_not_found(): + ''' a more interesting test ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['user_not_found']), + ]) + module_args = { + 'use_rest': 'never', + 'role_name': 'test', + 'applications': 'http', + 'authentication_method': 'password', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + # app = dict(application='testapp', authentication_methods=['testam']) + user_info = my_obj.get_user() + print('Info: test_user_get: %s' % repr(user_info)) + assert user_info is None + + +def test_ensure_user_apply_called(): + ''' creating user and checking idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['success']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_http']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'user', + 'applications': 'http', + 'authentication_method': 'password', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_sp_apply_called(): + ''' creating user with service_processor application and idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_service_processor']), + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_service_processor']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'user', + 'applications': 'service-processor', + 'authentication_method': 'password', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['applications'] = 'service_processor' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_apply_for_delete_called(): + ''' deleting user and checking idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ]) + module_args = { + "use_rest": "never", + "state": "absent", + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_lock_called(): + ''' changing user_lock to True and checking idempotency''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-lock', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "lock_user": False, + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['lock_user'] = 'true' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_unlock_called(): + ''' changing user_lock to False and checking idempotency''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_locked_user']), + ('ZAPI', 'security-login-get-iter', ZRR['login_locked_user']), + ('ZAPI', 'security-login-unlock', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "lock_user": True, + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['lock_user'] = 'false' + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_set_password_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + 'set_password': '123456', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_set_password_internal_error(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-modify-password', ZRR['internal_error']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + 'set_password': '123456', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert not my_obj.change_password() + + +def test_set_password_reused(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-modify-password', ZRR['reused_password']) + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + 'set_password': '123456', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert not my_obj.change_password() + + +def test_ensure_user_role_update_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-modify', ZRR['success']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'test123', + 'applications': 'console', + 'authentication_method': 'password', + 'set_password': '123456', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_role_update_additional_application_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'create', + 'role_name': 'test123', + 'applications': 'http', + 'authentication_method': 'password', + 'set_password': '123456', + 'replace_existing_apps_and_methods': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['error']), + ('ZAPI', 'security-login-create', ZRR['error']), + ('ZAPI', 'security-login-lock', ZRR['error']), + ('ZAPI', 'security-login-unlock', ZRR['error']), + ('ZAPI', 'security-login-delete', ZRR['error']), + ('ZAPI', 'security-login-modify-password', ZRR['error']), + ('ZAPI', 'security-login-modify', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never', + 'role_name': 'test', + 'applications': 'console', + 'authentication_method': 'password', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + app = dict(application='console', authentication_methods=['password']) + assert zapi_error_message('Error getting user user_name') in expect_and_capture_ansible_exception(my_obj.get_user, 'fail')['msg'] + assert zapi_error_message('Error creating user user_name') in expect_and_capture_ansible_exception(my_obj.create_user, 'fail', app)['msg'] + assert zapi_error_message('Error locking user user_name') in expect_and_capture_ansible_exception(my_obj.lock_given_user, 'fail')['msg'] + assert zapi_error_message('Error unlocking user user_name') in expect_and_capture_ansible_exception(my_obj.unlock_given_user, 'fail')['msg'] + assert zapi_error_message('Error removing user user_name') in expect_and_capture_ansible_exception(my_obj.delete_user, 'fail', app)['msg'] + assert zapi_error_message('Error setting password for user user_name') in expect_and_capture_ansible_exception(my_obj.change_password, 'fail')['msg'] + assert zapi_error_message('Error modifying user user_name') in expect_and_capture_ansible_exception(my_obj.modify_user, 'fail', app, ['password'])['msg'] + err_msg = 'vserver is required with ZAPI' + assert err_msg in create_module(my_module, DEFAULT_ARGS, {'use_rest': 'never', 'svm': None}, fail=True)['msg'] + + +def test_create_user_with_usm_auth(): + ''' switching back to ZAPI ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ]) + module_args = { + 'use_rest': 'auto', + 'applications': 'snmp', + 'authentication_method': 'usm', + 'name': 'create', + 'role_name': 'test123', + 'set_password': '123456', + 'remote_switch_ipaddress': '12.34.56.78', + 'authentication_password': 'auth_pwd', + 'authentication_protocol': 'md5', + 'privacy_password': 'auth_pwd', + 'privacy_protocol': 'des', + 'engine_id': 'engine_123', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_error_applications_snmp(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + module_args = { + 'use_rest': 'always', + 'applications': 'snmp', + 'authentication_method': 'usm', + 'name': 'create', + 'role_name': 'test123', + 'set_password': '123456', + } + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == "snmp as application is not supported in REST." + + +def test_ensure_user_get_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'applications': ['http', 'ontapi'], + 'authentication_method': 'password', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_user_rest() is not None + + +def test_ensure_create_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('POST', 'security/accounts', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'applications': ['http', 'ontapi'], + 'authentication_method': 'password', + 'set_password': 'xfjjttjwll`1', + 'lock_user': True + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_create_cluster_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('POST', 'security/accounts', SRR['empty_good']), + ]) + module_args = { + "hostname": "hostname", + "username": "username", + "password": "password", + "name": "user_name", + "use_rest": "always", + 'role_name': 'vsadmin', + 'applications': ['http', 'ontapi'], + 'authentication_method': 'password', + 'set_password': 'xfjjttjwll`1', + 'lock_user': True + } + assert call_main(my_main, module_args)['changed'] + + +def test_ensure_delete_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('DELETE', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'state': 'absent', + 'role_name': 'vsadmin', + 'applications': ['http', 'ontapi'], + 'authentication_method': 'password', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_modify_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application': 'ssh', + 'authentication_method': 'password', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_lock_unlock_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'applications': 'http', + 'authentication_method': 'password', + 'lock_user': True, + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_change_password_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + 'set_password': 'newvalue', + 'use_rest': 'always', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_change_password_user_rest_check_mode(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ]) + module_args = { + 'set_password': 'newvalue', + 'use_rest': 'always', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.module.check_mode = True + assert expect_and_capture_ansible_exception(my_obj.apply, 'exit')['changed'] + + +def test_existing_password(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['repeated_password']), # password + ]) + module_args = { + 'set_password': 'newvalue', + 'use_rest': 'always', + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_rest_unsupported_property(): + register_responses([ + ]) + module_args = { + 'privacy_password': 'value', + 'use_rest': 'always', + } + msg = "REST API currently does not support 'privacy_password'" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_negative_zapi_missing_netapp_lib(mock_has): + register_responses([ + ]) + mock_has.return_value = False + module_args = { + 'use_rest': 'never', + } + msg = "Error: the python NetApp-Lib module is required. Import error: None" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_zapi_missing_apps(): + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + } + msg = "application_dicts or application_strs is a required parameter with ZAPI" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_get_user(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + } + msg = "Error while fetching user info: Expected error" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_get_user_multiple(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest_multiple']), + ]) + module_args = { + 'use_rest': 'always', + } + msg = "Error while fetching user info, found multiple entries:" + assert msg in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_get_user_details(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + } + msg = "Error while fetching user details: Expected error" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_delete(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('DELETE', 'security/accounts/ansible_vserver/abcd', SRR['generic_error']), + ]) + module_args = { + "use_rest": "always", + 'state': 'absent', + 'role_name': 'vsadmin', + } + msg = "Error while deleting user: Expected error" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_unlocking(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['generic_error']), + ]) + module_args = { + 'use_rest': 'always', + 'lock_user': True + } + msg = "Error while locking/unlocking user: Expected error" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_unlocking_no_password(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest_no_pwd']), + ]) + module_args = { + 'use_rest': 'always', + 'lock_user': True + } + msg = "Error: cannot modify lock state if password is not set." + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_changing_password(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['generic_error']), + ]) + module_args = { + 'set_password': '12345', + 'use_rest': 'always', + } + msg = "Error while updating user password: Expected error" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_rest_error_on_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['generic_error']), + ]) + module_args = { + 'role_name': 'vsadmin2', + 'use_rest': 'always', + 'applications': ['http', 'ontapi'], + 'authentication_method': 'password', + } + msg = "Error while modifying user details: Expected error" + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_rest_unlocking_with_password(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest_no_pwd']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['success']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['success']), + ]) + module_args = { + 'set_password': 'ansnssnajj12%', + 'use_rest': 'always', + 'lock_user': True + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_create_validations(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ]) + module_args = { + 'use_rest': 'always', + } + msg = 'Error: missing required parameters for create: role_name and: application_dicts or application_strs.' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['role_name'] = 'role' + msg = 'Error: missing required parameter for create: application_dicts or application_strs.' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args.pop('role_name') + module_args['applications'] = 'http' + module_args['authentication_method'] = 'password' + msg = 'Error: missing required parameter for create: role_name.' + assert msg == call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_dicts.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_dicts.py new file mode 100644 index 000000000..a4181d54d --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_dicts.py @@ -0,0 +1,589 @@ +# (c) 2018 - 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_user ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, print_requests, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_error_message, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + expect_and_capture_ansible_exception, call_main, create_module, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_user import NetAppOntapUser as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'invalid_value_error': (400, None, {'message': "invalid value service_processor"}), + 'get_user_rest': (200, + {'num_records': 1, + 'records': [{'owner': {'uuid': 'ansible_vserver'}, + 'name': 'abcd'}]}, None), + 'get_user_details_rest': (200, + {'role': {'name': 'vsadmin'}, + 'applications': [{'application': 'http', 'authentication-method': 'password', 'second_authentication_method': 'none'}], + 'locked': False}, None) +}) + + +def login_info(locked, role_name, apps): + attributes_list = [] + for app in apps: + if app in ('console', 'service-processor',): + attributes_list.append( + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': app, 'authentication-method': 'password'}} + ) + if app in ('ssh',): + attributes_list.append( + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': 'ssh', 'authentication-method': 'publickey', + 'second-authentication-method': 'password'}}, + ) + if app in ('http',): + attributes_list.extend([ + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': 'http', 'authentication-method': 'password'}}, + {'security-login-account-info': { + 'is-locked': locked, 'role-name': role_name, 'application': 'http', 'authentication-method': 'saml'}}, + ]) + return { + 'num-records': len(attributes_list), + 'attributes-list': attributes_list + } + + +ZRR = zapi_responses({ + 'login_locked_user': build_zapi_response(login_info("true", 'user', ['console', 'ssh'])), + 'login_unlocked_user': build_zapi_response(login_info("False", 'user', ['console', 'ssh'])), + 'login_unlocked_user_console': build_zapi_response(login_info("False", 'user', ['console'])), + 'login_unlocked_user_service_processor': build_zapi_response(login_info("False", 'user', ['service-processor'])), + 'login_unlocked_user_ssh': build_zapi_response(login_info("False", 'user', ['ssh'])), + 'login_unlocked_user_http': build_zapi_response(login_info("False", 'user', ['http'])) +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'user_name', + 'vserver': 'vserver', +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + "use_rest": "never" + } + print('Info: %s' % call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg']) + + +def test_module_fail_when_application_name_is_repeated(): + ''' required arguments are reported as errors ''' + register_responses([ + ]) + module_args = { + "use_rest": "never", + "application_dicts": [ + {'application': 'ssh', 'authentication_methods': ['cert']}, + {'application': 'ssh', 'authentication_methods': ['password']}] + } + error = 'Error: repeated application name: ssh. Group all authentication methods under a single entry.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_ensure_user_get_called(): + ''' a more interesting test ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_http']), + ]) + module_args = { + "use_rest": "never", + 'role_name': 'test', + 'applications': 'console', + 'authentication_method': 'password', + 'replace_existing_apps_and_methods': 'always' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + user_info = my_obj.get_user() + print('Info: test_user_get: %s' % repr(user_info)) + assert 'saml' in user_info['applications'][0]['authentication_methods'] + + +def test_ensure_user_apply_called_replace(): + ''' creating user and checking idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'user', + 'applications': 'console', + 'authentication_method': 'password', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_apply_called_using_dict(): + ''' creating user and checking idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_ssh']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'user', + 'application_dicts': [{ + 'application': 'ssh', + 'authentication_methods': ['publickey'], + 'second_authentication_method': 'password' + }] + } + + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + # BUG: SSH is not idempotent with SSH and replace_existing_apps_and_methods == 'auto' + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_apply_called_add(): + ''' creating user and checking idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-modify', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_console']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'user', + 'application_dicts': + [dict(application='console', authentication_methods=['password'])], + 'replace_existing_apps_and_methods': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_sp_apply_called(): + ''' creating user with service_processor application and idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_service_processor']), + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user_service_processor']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'user', + 'application_dicts': + [dict(application='service-processor', authentication_methods=['password'])], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['application_dicts'] = [dict(application='service_processor', authentication_methods=['password'])] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_apply_for_delete_called(): + ''' deleting user and checking idempotency ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-get-iter', ZRR['no_records']), + ]) + module_args = { + "use_rest": "never", + "state": "absent", + 'name': 'create', + 'role_name': 'user', + 'application_dicts': + [dict(application='console', authentication_methods=['password'])], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_lock_called(): + ''' changing user_lock to True and checking idempotency''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-lock', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "lock_user": False, + 'name': 'create', + 'role_name': 'user', + 'application_dicts': [ + dict(application='console', authentication_methods=['password']), + dict(application='ssh', authentication_methods=['publickey'], second_authentication_method='password') + ], + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['lock_user'] = True + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_unlock_called(): + ''' changing user_lock to False and checking idempotency''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_locked_user']), + ('ZAPI', 'security-login-get-iter', ZRR['login_locked_user']), + ('ZAPI', 'security-login-unlock', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + "lock_user": True, + 'name': 'create', + 'role_name': 'user', + 'application_dicts': [ + dict(application='console', authentication_methods=['password']), + dict(application='ssh', authentication_methods=['publickey'], second_authentication_method='password') + ], + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + module_args['lock_user'] = False + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_set_password_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'user', + 'application_dicts': [ + dict(application='console', authentication_methods=['password']), + dict(application='ssh', authentication_methods=['publickey'], second_authentication_method='password') + ], + 'set_password': '123456', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_role_update_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-modify', ZRR['success']), + ('ZAPI', 'security-login-modify', ZRR['success']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'test123', + 'application_dicts': [ + dict(application='console', authentication_methods=['password']), + dict(application='ssh', authentication_methods=['publickey'], second_authentication_method='password') + ], + 'set_password': '123456', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_role_update_additional_application_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'test123', + 'application_dicts': + [dict(application='http', authentication_methods=['password'])], + 'set_password': '123456', + 'replace_existing_apps_and_methods': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_user_role_update_additional_method_called(): + ''' set password ''' + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['login_unlocked_user']), + ('ZAPI', 'security-login-create', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-delete', ZRR['success']), + ('ZAPI', 'security-login-modify-password', ZRR['success']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'test123', + 'application_dicts': + [dict(application='console', authentication_methods=['domain'])], + 'set_password': '123456', + 'replace_existing_apps_and_methods': 'always' + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'security-login-get-iter', ZRR['error']), + ('ZAPI', 'security-login-create', ZRR['error']), + ('ZAPI', 'security-login-lock', ZRR['error']), + ('ZAPI', 'security-login-unlock', ZRR['error']), + ('ZAPI', 'security-login-delete', ZRR['error']), + ('ZAPI', 'security-login-modify-password', ZRR['error']), + ('ZAPI', 'security-login-modify', ZRR['error']), + ]) + module_args = { + "use_rest": "never", + 'name': 'create', + 'role_name': 'test123', + 'application_dicts': + [dict(application='console', authentication_methods=['password'])], + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + app = dict(application='console', authentication_methods=['password']) + assert zapi_error_message('Error getting user create') in expect_and_capture_ansible_exception(my_obj.get_user, 'fail')['msg'] + assert zapi_error_message('Error creating user create') in expect_and_capture_ansible_exception(my_obj.create_user, 'fail', app)['msg'] + assert zapi_error_message('Error locking user create') in expect_and_capture_ansible_exception(my_obj.lock_given_user, 'fail')['msg'] + assert zapi_error_message('Error unlocking user create') in expect_and_capture_ansible_exception(my_obj.unlock_given_user, 'fail')['msg'] + assert zapi_error_message('Error removing user create') in expect_and_capture_ansible_exception(my_obj.delete_user, 'fail', app)['msg'] + assert zapi_error_message('Error setting password for user create') in expect_and_capture_ansible_exception(my_obj.change_password, 'fail')['msg'] + assert zapi_error_message('Error modifying user create') in expect_and_capture_ansible_exception(my_obj.modify_user, 'fail', app, ['password'])['msg'] + + +def test_rest_error_applications_snmp(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['get_user_rest']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'test123', + 'application_dicts': + [dict(application='snmp', authentication_methods=['usm'])], + 'set_password': '123456', + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == "snmp as application is not supported in REST." + + +def test_ensure_user_get_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': + [dict(application='http', authentication_methods=['password']), + dict(application='ontapi', authentication_methods=['password'])], + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert my_obj.get_user_rest() is not None + + +def test_ensure_create_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('POST', 'security/accounts', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': + [dict(application='http', authentication_methods=['password']), + dict(application='ontapi', authentication_methods=['password'])], + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_delete_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('DELETE', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'state': 'absent', + 'role_name': 'vsadmin', + 'application_dicts': + [dict(application='http', authentication_methods=['password']), + dict(application='ontapi', authentication_methods=['password'])], + 'vserver': None + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_modify_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': [dict(application='service_processor', authentication_methods=['usm'])] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_lock_unlock_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': + [dict(application='http', authentication_methods=['password'])], + 'lock_user': True, + } + print_requests() + # TODO: a single PATCH should be enough ? + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_change_password_user_rest_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['get_user_rest']), + ('GET', 'security/accounts/ansible_vserver/abcd', SRR['get_user_details_rest']), + ('PATCH', 'security/accounts/ansible_vserver/abcd', SRR['empty_good']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': + [dict(application='http', authentication_methods=['password'])], + 'password': 'newvalue', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_sp_retry(): + """simulate error in create_user_rest and retry""" + register_responses([ + # retry followed by error + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('POST', 'security/accounts', SRR['invalid_value_error']), + ('POST', 'security/accounts', SRR['generic_error']), + # retry followed by success + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'security/accounts', SRR['zero_records']), + ('POST', 'security/accounts', SRR['invalid_value_error']), + ('POST', 'security/accounts', SRR['success']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': [ + dict(application='service_processor', authentication_methods=['usm']) + ] + } + assert 'invalid value' in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + module_args['application_dicts'] = [dict(application='service-processor', authentication_methods=['usm'])] + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_validate_application(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': + [dict(application='http', authentication_methods=['password'])], + 'password': 'newvalue', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'second_authentication_method' in my_obj.parameters['applications'][0] + my_obj.parameters['applications'][0].pop('second_authentication_method') + my_obj.validate_applications() + assert 'second_authentication_method' in my_obj.parameters['applications'][0] + assert my_obj.parameters['applications'][0]['second_authentication_method'] is None + + +def test_sp_transform(): + current = {'applications': []} + sp_app_u = 'service_processor' + sp_app_d = 'service-processor' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ]) + # 1. no change using underscore + module_args = { + "use_rest": "always", + 'role_name': 'vsadmin', + 'application_dicts': [ + {'application': sp_app_u, 'authentication_methods': ['password']} + ], + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.change_sp_application([]) + sp_apps = [application['application'] for application in my_obj.parameters['applications'] if application['application'].startswith('service')] + assert sp_apps == [sp_app_u] + # 2. change underscore -> dash + my_obj.change_sp_application([{'application': sp_app_d}]) + sp_apps = [application['application'] for application in my_obj.parameters['applications'] if application['application'].startswith('service')] + assert sp_apps == [sp_app_d] + # 3. no change using dash + module_args['application_dicts'] = [{'application': sp_app_d, 'authentication_methods': ['password']}] + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + my_obj.change_sp_application([]) + sp_apps = [application['application'] for application in my_obj.parameters['applications'] if application['application'].startswith('service')] + assert sp_apps == [sp_app_d] + # 4. change dash -> underscore + my_obj.change_sp_application([{'application': sp_app_u}]) + sp_apps = [application['application'] for application in my_obj.parameters['applications'] if application['application'].startswith('service')] + assert sp_apps == [sp_app_u] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py new file mode 100644 index 000000000..9fafd8a68 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role.py @@ -0,0 +1,139 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role \ + import NetAppOntapUserRole as role_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def build_role_info(access_level='all'): + return { + 'num-records': 1, + 'attributes-list': { + 'security-login-role-info': { + 'access-level': access_level, + 'command-directory-name': 'volume', + 'role-name': 'testrole', + 'role-query': 'show', + 'vserver': 'ansible' + } + } + } + + +ZRR = zapi_responses({ + 'build_role_info': build_zapi_response(build_role_info()), + 'build_role_modified': build_zapi_response(build_role_info('none')) +}) + +DEFAULT_ARGS = { + 'name': 'testrole', + 'vserver': 'ansible', + 'command_directory_name': 'volume', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'https': 'False', + 'use_rest': 'never' +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "name"] + error = create_module(role_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_get_nonexistent_policy(): + ''' Test if get_role returns None for non-existent role ''' + register_responses([ + ('ZAPI', 'security-login-role-get-iter', ZRR['empty']), + ]) + my_obj = create_module(role_module, DEFAULT_ARGS) + assert my_obj.get_role() is None + + +def test_get_existing_role(): + ''' Test if get_role returns details for existing role ''' + register_responses([ + ('ZAPI', 'security-login-role-get-iter', ZRR['build_role_info']), + ]) + my_obj = create_module(role_module, DEFAULT_ARGS) + current = my_obj.get_role() + assert current['name'] == DEFAULT_ARGS['name'] + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'security-login-role-get-iter', ZRR['empty']), + ('ZAPI', 'security-login-role-create', ZRR['success']), + # idempotency check + ('ZAPI', 'security-login-role-get-iter', ZRR['build_role_info']), + ]) + assert create_and_apply(role_module, DEFAULT_ARGS)['changed'] + assert not create_and_apply(role_module, DEFAULT_ARGS)['changed'] + + +def test_successful_modify(): + ''' Test successful modify ''' + register_responses([ + ('ZAPI', 'security-login-role-get-iter', ZRR['build_role_info']), + ('ZAPI', 'security-login-role-modify', ZRR['success']), + # idempotency check + ('ZAPI', 'security-login-role-get-iter', ZRR['build_role_modified']), + ]) + assert create_and_apply(role_module, DEFAULT_ARGS, {'access_level': 'none'})['changed'] + assert not create_and_apply(role_module, DEFAULT_ARGS, {'access_level': 'none'})['changed'] + + +def test_successful_delete(): + ''' Test delete existing role ''' + register_responses([ + ('ZAPI', 'security-login-role-get-iter', ZRR['build_role_info']), + ('ZAPI', 'security-login-role-delete', ZRR['success']), + # idempotency check + ('ZAPI', 'security-login-role-get-iter', ZRR['empty']), + ]) + assert create_and_apply(role_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + assert not create_and_apply(role_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('ZAPI', 'security-login-role-get-iter', ZRR['error']), + ('ZAPI', 'security-login-role-create', ZRR['error']), + ('ZAPI', 'security-login-role-modify', ZRR['error']), + ('ZAPI', 'security-login-role-delete', ZRR['error']) + ]) + my_obj = create_module(role_module, DEFAULT_ARGS) + assert 'Error getting role' in expect_and_capture_ansible_exception(my_obj.get_role, 'fail')['msg'] + assert 'Error creating role' in expect_and_capture_ansible_exception(my_obj.create_role, 'fail')['msg'] + assert 'Error modifying role' in expect_and_capture_ansible_exception(my_obj.modify_role, 'fail', {})['msg'] + assert 'Error removing role' in expect_and_capture_ansible_exception(my_obj.delete_role, 'fail')['msg'] + + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['command_directory_name'] + assert 'Error: command_directory_name is required' in create_module(role_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['vserver'] + assert 'Error: vserver is required' in create_module(role_module, DEFAULT_ARGS_COPY, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role_rest.py new file mode 100644 index 000000000..b6e1e0b95 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_user_role_rest.py @@ -0,0 +1,647 @@ +# (c) 2022-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_user_role \ + import NetAppOntapUserRole as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'user_role_9_10': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [ + { + "access": "readonly", + "path": "/api/storage/volumes" + } + ], + "name": "admin", + "scope": "cluster" + }, None), + 'user_role_9_11_command': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [ + { + "path": "job schedule interval", + 'query': "-days <1 -hours >12" + }, { + 'path': 'DEFAULT', + 'access': 'none', + "_links": { + "self": { + "href": "/api/resourcelink" + }} + } + ], + "name": "admin", + "scope": "cluster" + }, None), + 'user_role_9_10_two_paths': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [ + { + "access": "readonly", + "path": "/api/storage/volumes" + }, + { + "access": "readonly", + "path": "/api/cluster/jobs", + } + ], + "name": "admin", + "scope": "cluster" + }, None), + 'user_role_9_10_two_paths_modified': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [ + {"access": "readonly", "path": "/api/storage/volumes"}, + {"access": "readonly", "path": "/api/cluster/jobs"} + ], + "name": "admin", + "scope": "cluster" + }, None), + 'user_role_9_11': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [ + { + "access": "readonly", + "path": "/api/cluster/jobs", + } + ], + "name": "admin", + "scope": "cluster" + }, None), + 'user_role_cluster_jobs_all': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [{"access": "all", "path": "/api/cluster/jobs"}], + "name": "admin", + "scope": "cluster" + }, None), + 'user_role_privileges': (200, { + "records": [ + { + "access": "readonly", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "path": "/api/cluster/jobs", + } + ], + }, None), + 'user_role_privileges_command': (200, { + "records": [ + { + "access": "all", + 'query': "-days <1 -hours >12", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "path": "job schedule interval", + } + ], + }, None), + 'user_role_privileges_two_paths': (200, { + "records": [ + { + "access": "readonly", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "path": "/api/cluster/jobs", + }, { + "access": "readonly", + "_links": { + "self": { + "href": "/api/resourcelink" + } + }, + "path": "/api/storage/volumes", + } + ], + }, None), + 'user_role_volume': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [ + { + "access": "readonly", + "path": "volume create" + }, + { + "access": "readonly", + "path": "volume modify", + }, + { + "access": "readonly", + "path": "volume show", + } + ], + "name": "admin", + }, None), + 'user_role_vserver': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [{"access": "readonly", "path": "vserver show"}], + "name": "admin", + }, None), + 'user_role_volume_privileges': (200, { + "records": [ + {"access": "readonly", "path": "volume create"}, + {"access": "readonly", "path": "volume modify"} + ], + }, None), + 'user_role_privileges_schedule': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [{"access": "all", "path": "job schedule interval", "query": "-days <1 -hours >12"}], + "name": "admin", + }, None), + 'user_role_privileges_schedule_modify': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [{"access": "all", "path": "job schedule interval", "query": "-days <1 -hours >8"}], + "name": "admin", + }, None), + 'user_role_volume_with_query': (200, { + "owner": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "privileges": [{"access": "readonly", "path": "/api/storage/volumes", "query": "-vserver vs1|vs2|vs3 -destination-aggregate aggr1|aggr2"}], + "name": "admin", + "scope": "cluster" + }, None), + "error_4": (409, None, {'code': 4, 'message': "entry doesn't exist, 'target': 'path'"}), +}) + +PRIVILEGES_SINGLE_WITH_QUERY = [{ + "path": "job schedule interval", + 'query': "-days <1 -hours >12" +}] + +PRIVILEGES_PATH_ONLY = [{ + "path": "/api/cluster/jobs" +}] + +PRIVILEGES_2_PATH_ONLY = [{ + "path": "/api/cluster/jobs" +}, { + "path": "/api/storage/volumes" +}] + +PRIVILEGES = [{ + 'path': '/api/storage/volumes', + 'access': 'readonly' +}] + +PRIVILEGES_911 = [{ + 'path': '/api/storage/volumes', + 'access': 'readonly', +}] + +PRIVILEGES_MODIFY = [{ + 'path': '/api/cluster/jobs', + 'access': 'all' +}] + +PRIVILEGES_COMMAND_MODIFY = [{ + 'path': 'job schedule interval', + 'query': "-days <1 -hours >8" +}] + +PRIVILEGES_MODIFY_911 = [{ + 'path': '/api/cluster/jobs', + 'access': 'all', +}] + +PRIVILEGES_MODIFY_NEW_PATH = [{ + 'path': '/api/cluster/jobs', + 'access': 'all' +}, { + "path": "/api/storage/volumes", + "access": 'all' +}] + +PRIVILEGES_MODIFY_NEW_PATH_9_11 = [{ + 'path': '/api/cluster/jobs', + 'access': 'all', +}, { + "path": "/api/storage/volumes", + "access": 'all', +}] + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'name': 'admin', + 'vserver': 'svm1' +} + + +def test_privileges_query_in_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = {'privileges': PRIVILEGES_SINGLE_WITH_QUERY, + 'use_rest': 'always'} + my_module_object = create_module(my_module, DEFAULT_ARGS, module_args, fail=True) + msg = 'Minimum version of ONTAP for privileges.query is (9, 11, 1)' + assert msg in my_module_object['msg'] + + +def test_get_user_role_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_role() is None + + +def test_get_user_role_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['generic_error']) + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error getting role admin: calling: security/roles: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_role, 'fail')['msg'] + + +def test_get_user_role(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['user_role_9_10']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_role() is not None + + +def test_get_user_role_9_11(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_9_11']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_role() is not None + + +def test_create_user_role_9_10_new_format(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_9_10']) + ]) + module_args = {'privileges': PRIVILEGES} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_9_11_new_format(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_9_10']) + ]) + module_args = {'privileges': PRIVILEGES_911} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_9_11_new_format_query(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_privileges_schedule']) + ]) + module_args = {'privileges': PRIVILEGES_SINGLE_WITH_QUERY} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_9_10_new_format_path_only(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_9_11']) + ]) + module_args = {'privileges': PRIVILEGES_PATH_ONLY} + print(module_args) + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_9_10_new_format_2_path_only(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_9_10_two_paths']) + ]) + module_args = {'privileges': PRIVILEGES_2_PATH_ONLY} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_9_10_old_format(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_9_10']) + ]) + module_args = {'command_directory_name': "/api/storage/volumes", + 'access_level': 'readonly'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_9_11_old_format_with_query(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_volume_with_query']) + ]) + module_args = {'command_directory_name': "/api/storage/volumes", + 'access_level': 'readonly', + 'query': "-vserver vs1|vs2|vs3 -destination-aggregate aggr1|aggr2"} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_create_user_role_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'security/roles', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['privileges'] = PRIVILEGES + error = expect_and_capture_ansible_exception(my_obj.create_role, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error creating role admin: calling: security/roles: got Expected error.' == error + + +def test_delete_user_role(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_9_10']), + ('DELETE', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin', SRR['empty_good']), + ('GET', 'security/roles', SRR['empty_records']) + ]) + module_args = {'state': 'absent', + 'command_directory_name': "/api/storage/volumes", + 'access_level': 'readonly'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_user_role_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('DELETE', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['privileges'] = PRIVILEGES + my_obj.parameters['state'] = 'absent' + my_obj.owner_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.delete_role, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error deleting role admin: calling: security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin: got Expected error.' == error + + +def test_modify_user_role_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['user_role_9_10']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_cluster_jobs_all']) + ]) + module_args = {'privileges': PRIVILEGES_MODIFY} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_user_role_command_9_10(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_9_11_command']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges_command']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/job schedule interval', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_privileges_schedule_modify']) + ]) + module_args = {'privileges': PRIVILEGES_COMMAND_MODIFY} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_remove_user_role_9_10(): + # This test will modify cluster/job, and delete storage/volumes + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['user_role_9_10_two_paths']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges_two_paths']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs', SRR['empty_good']), + ('DELETE', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fstorage%2Fvolumes', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_cluster_jobs_all']) + ]) + module_args = {'privileges': PRIVILEGES_MODIFY} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_user_role_9_11(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_9_11']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_cluster_jobs_all']) + ]) + module_args = {'privileges': PRIVILEGES_MODIFY_911} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_user_role_create_new_privilege(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles', SRR['user_role_9_10']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs', SRR['empty_good']), # First path + ('POST', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['empty_good']), # Second path + ('GET', 'security/roles', SRR['user_role_9_10_two_paths_modified']) + ]) + module_args = {'privileges': PRIVILEGES_MODIFY_NEW_PATH} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_user_role_create_new_privilege_9_11(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_9_11']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs', SRR['empty_good']), # First path + ('POST', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['empty_good']), # Second path + ('GET', 'security/roles', SRR['empty_records']) + ]) + module_args = {'privileges': PRIVILEGES_MODIFY_NEW_PATH_9_11} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_remove_user_role_error(): + # This test will modify cluster/job, and delete storage/volumes + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('DELETE', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fstorage%2Fvolumes', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['name'] = 'admin' + my_obj.owner_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.delete_role_privilege, 'fail', '/api/storage/volumes')['msg'] + print('Info: %s' % error) + assert 'Error deleting role privileges admin: calling: security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fstorage%2Fvolumes: '\ + 'got Expected error.' == error + + +def test_get_user_role_privileges_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['name'] = 'admin' + my_obj.owner_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.get_role_privileges_rest, 'fail')['msg'] + print('Info: %s' % error) + assert 'Error getting role privileges for role admin: calling: security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges: '\ + 'got Expected error.' == error + + +def test_create_user_role_privileges_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('POST', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['name'] = 'admin' + my_obj.owner_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + error = expect_and_capture_ansible_exception(my_obj.create_role_privilege, 'fail', PRIVILEGES[0])['msg'] + print('Info: %s' % error) + assert 'Error creating role privilege /api/storage/volumes: calling: security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges: '\ + 'got Expected error.' == error + + +def test_modify_user_role_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_privileges']), + ('PATCH', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs', SRR['generic_error']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.parameters['privileges'] = PRIVILEGES_MODIFY + my_obj.owner_uuid = '02c9e252-41be-11e9-81d5-00a0986138f7' + current = {'privileges': PRIVILEGES_MODIFY} + error = expect_and_capture_ansible_exception(my_obj.modify_role, 'fail', current)['msg'] + print('Info: %s' % error) + assert 'Error modifying privileges for path %2Fapi%2Fcluster%2Fjobs: calling: '\ + 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/%2Fapi%2Fcluster%2Fjobs: '\ + 'got Expected error.' == error + + +def test_command_directory_present_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + assert 'Error: either path or command_directory_name is required' in create_and_apply(my_module, DEFAULT_ARGS, fail=True)['msg'] + + +def test_warnings_additional_commands_added_after_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['empty_records']), + ('POST', 'security/roles', SRR['empty_good']), + ('GET', 'security/roles', SRR['user_role_volume']) + ]) + args = {'privileges': [{'path': 'volume create', 'access': 'all'}]} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert_warning_was_raised("Create operation also affected additional related commands", partial_match=True) + + +def test_warnings_create_required_after_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_volume']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_volume_privileges']), + ('DELETE', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/volume modify', SRR['empty_good']), + ('GET', 'security/roles', SRR['empty_records']), + ]) + args = {'privileges': [{'path': 'volume create', 'access': 'readonly'}]} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert_warning_was_raised("Create role is required", partial_match=True) + + +def test_warnings_modify_required_after_original_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'security/roles', SRR['user_role_volume']), + ('GET', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges', SRR['user_role_volume_privileges']), + ('DELETE', 'security/roles/02c9e252-41be-11e9-81d5-00a0986138f7/admin/privileges/volume modify', SRR['error_4']), + ('GET', 'security/roles', SRR['user_role_vserver']), + ]) + args = {'privileges': [{'path': 'volume create', 'access': 'readonly'}]} + assert create_and_apply(my_module, DEFAULT_ARGS, args)['changed'] + assert_warning_was_raised("modify is required, desired", partial_match=True) + + +def test_error_with_legacy_commands_9_10_1(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']) + ]) + args = {'privileges': [{'path': 'volume create', 'access': 'readonly'}]} + assert "Error: Invalid URI ['volume create']" in create_module(my_module, DEFAULT_ARGS, args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py new file mode 100644 index 000000000..3161ead04 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume.py @@ -0,0 +1,2011 @@ +# (c) 2018-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + assert_warning_was_raised, call_main, create_module, create_and_apply, expect_and_capture_ansible_exception, patch_ansible, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import \ + get_mock_record, patch_request_and_invoke, print_requests, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_error, build_zapi_response, zapi_error_message, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume \ + import NetAppOntapVolume as vol_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), +} + +MOCK_VOL = { + 'name': 'test_vol', + 'aggregate': 'test_aggr', + 'junction_path': '/test', + 'vserver': 'test_vserver', + 'size': 20971520, + 'unix_permissions': '755', + 'user_id': 100, + 'group_id': 1000, + 'snapshot_policy': 'default', + 'qos_policy_group': 'performance', + 'qos_adaptive_policy_group': 'performance', + 'percent_snapshot_space': 60, + 'language': 'en', + 'vserver_dr_protection': 'unprotected', + 'uuid': 'UUID' +} + + +def volume_info(style, vol_details=None, remove_keys=None, encrypt='false'): + if not vol_details: + vol_details = MOCK_VOL + info = copy.deepcopy({ + 'num-records': 1, + 'attributes-list': { + 'volume-attributes': { + 'encrypt': encrypt, + 'volume-id-attributes': { + 'aggr-list': vol_details['aggregate'], + 'containing-aggregate-name': vol_details['aggregate'], + 'flexgroup-uuid': 'uuid', + 'junction-path': vol_details['junction_path'], + 'style-extended': style, + 'type': 'rw' + }, + 'volume-comp-aggr-attributes': { + 'tiering-policy': 'snapshot-only' + }, + 'volume-language-attributes': { + 'language-code': 'en' + }, + 'volume-export-attributes': { + 'policy': 'default' + }, + 'volume-performance-attributes': { + 'is-atime-update-enabled': 'true' + }, + 'volume-state-attributes': { + 'state': "online", + 'is-nvfail-enabled': 'true' + }, + 'volume-inode-attributes': { + 'files-total': '2000', + }, + 'volume-space-attributes': { + 'space-guarantee': 'none', + 'size': vol_details['size'], + 'percentage-snapshot-reserve': vol_details['percent_snapshot_space'], + 'space-slo': 'thick' + }, + 'volume-snapshot-attributes': { + 'snapshot-policy': vol_details['snapshot_policy'] + }, + 'volume-security-attributes': { + 'volume-security-unix-attributes': { + 'permissions': vol_details['unix_permissions'], + 'group-id': vol_details['group_id'], + 'user-id': vol_details['user_id'] + }, + 'style': 'unix', + }, + 'volume-vserver-dr-protection-attributes': { + 'vserver-dr-protection': vol_details['vserver_dr_protection'], + }, + 'volume-qos-attributes': { + 'policy-group-name': vol_details['qos_policy_group'], + 'adaptive-policy-group-name': vol_details['qos_adaptive_policy_group'] + }, + 'volume-snapshot-autodelete-attributes': { + 'commitment': 'try', + 'is-autodelete-enabled': 'true', + } + } + } + }) + if remove_keys: + for key in remove_keys: + if key == 'is_online': + del info['attributes-list']['volume-attributes']['volume-state-attributes']['state'] + else: + raise KeyError('unexpected key %s' % key) + return info + + +def vol_encryption_conversion_status(status): + return { + 'num-records': 1, + 'attributes-list': { + 'volume-encryption-conversion-info': { + 'status': status + } + } + } + + +def vol_move_status(status): + return { + 'num-records': 1, + 'attributes-list': { + 'volume-move-info': { + 'state': status, + 'details': 'some info' + } + } + } + + +def job_info(state, error): + return { + 'num-records': 1, + 'attributes': { + 'job-info': { + 'job-state': state, + 'job-progress': 'progress', + 'job-completion': error, + } + } + } + + +def results_info(status): + return { + 'result-status': status, + 'result-jobid': 'job12345', + } + + +def modify_async_results_info(status, error=None): + list_name = 'failure-list' if error else 'success-list' + info = { + list_name: { + 'volume-modify-iter-async-info': { + 'status': status, + 'jobid': '1234' + } + } + } + if error: + info[list_name]['volume-modify-iter-async-info']['error-message'] = error + return info + + +def sis_info(): + return { + 'num-records': 1, + 'attributes-list': { + 'sis-status-info': { + 'policy': 'test', + 'is-compression-enabled': 'true', + 'sis-status-completion': 'false', + } + } + } + + +ZRR = zapi_responses({ + 'get_flexgroup': build_zapi_response(volume_info('flexgroup')), + 'get_flexvol': build_zapi_response(volume_info('flexvol')), + 'get_flexvol_encrypted': build_zapi_response(volume_info('flexvol', encrypt='true')), + 'get_flexvol_no_online_key': build_zapi_response(volume_info('flexvol', remove_keys=['is_online'])), + 'job_failure': build_zapi_response(job_info('failure', 'failure')), + 'job_other': build_zapi_response(job_info('other', 'other_error')), + 'job_running': build_zapi_response(job_info('running', None)), + 'job_success': build_zapi_response(job_info('success', None)), + 'job_time_out': build_zapi_response(job_info('running', 'time_out')), + 'job_no_completion': build_zapi_response(job_info('failure', None)), + 'async_results': build_zapi_response(results_info('in_progress')), + 'failed_results': build_zapi_response(results_info('failed')), + 'modify_async_result_success': build_zapi_response(modify_async_results_info('in_progress')), + 'modify_async_result_failure': build_zapi_response(modify_async_results_info('failure', 'error_in_modify')), + 'vol_encryption_conversion_status_running': build_zapi_response(vol_encryption_conversion_status('running')), + 'vol_encryption_conversion_status_idle': build_zapi_response(vol_encryption_conversion_status('Not currently going on.')), + 'vol_encryption_conversion_status_error': build_zapi_response(vol_encryption_conversion_status('other')), + 'vol_move_status_running': build_zapi_response(vol_move_status('healthy')), + 'vol_move_status_idle': build_zapi_response(vol_move_status('done')), + 'vol_move_status_error': build_zapi_response(vol_move_status('failed')), + 'insufficient_privileges': build_zapi_error(12346, 'Insufficient privileges: user USERID does not have read access to this resource'), + 'get_sis_info': build_zapi_response(sis_info()), + 'error_15661': build_zapi_error(15661, 'force job not found error'), + 'error_tiering_94': build_zapi_error(94, 'volume-comp-aggr-attributes') +}) + + +MINIMUM_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': 'test_vol', + 'vserver': 'test_vserver', + 'use_rest': 'never' +} + + +DEFAULT_ARGS = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': 'test_vol', + 'vserver': 'test_vserver', + 'policy': 'default', + 'language': 'en', + 'is_online': True, + 'unix_permissions': '---rwxr-xr-x', + 'user_id': 100, + 'group_id': 1000, + 'snapshot_policy': 'default', + 'qos_policy_group': 'performance', + 'qos_adaptive_policy_group': 'performance', + 'size': 20, + 'size_unit': 'mb', + 'junction_path': '/test', + 'percent_snapshot_space': 60, + 'type': 'rw', + 'nvfail_enabled': True, + 'space_slo': 'thick', + 'use_rest': 'never' +} + + +ZAPI_ERROR = 'NetApp API failed. Reason - 12345:synthetic error for UT purpose' + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + error = create_module(vol_module, {}, fail=True) + print('Info: %s' % error['msg']) + assert 'missing required arguments:' in error['msg'] + + +def test_get_nonexistent_volume(): + ''' Test if get_volume returns None for non-existent volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['success']), + ]) + assert create_module(vol_module, DEFAULT_ARGS).get_volume() is None + + +def test_get_error(): + ''' Test if get_volume handles error ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['error']), + ]) + error = 'Error fetching volume test_vol : %s' % ZAPI_ERROR + assert expect_and_capture_ansible_exception(create_module(vol_module, DEFAULT_ARGS).get_volume, 'fail')['msg'] == error + + +def test_get_existing_volume(): + ''' Test if get_volume returns details for existing volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + volume_info = create_module(vol_module, DEFAULT_ARGS).get_volume() + assert volume_info is not None + assert 'aggregate_name' in volume_info + + +def test_create_error_missing_param(): + ''' Test if create throws an error if aggregate_name is not specified''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ]) + module_args = { + 'size': 20, + 'encrypt': True, + } + msg = 'Error provisioning volume test_vol: aggregate_name is required' + assert msg == create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'size': 20, + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_successful_create_with_completion(dont_sleep): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), # wait for online + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'size': 20, + 'wait_for_completion': True + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_error_timeout_create_with_completion(dont_sleep): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ]) + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'size': 20, + 'time_out': 42, + 'wait_for_completion': True + } + error = "Error waiting for volume test_vol to come online: ['Timeout after 42 seconds']" + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +@patch('time.sleep') +def test_error_timeout_keyerror_create_with_completion(dont_sleep): + ''' Test successful create ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol_no_online_key']), # wait for online + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-get-iter', ZRR['no_records']), # wait for online + ]) + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'size': 20, + 'time_out': 42, + 'wait_for_completion': True + } + error_py3x = '''Error waiting for volume test_vol to come online: ["KeyError('is_online')", 'Timeout after 42 seconds']''' + error_py27 = '''Error waiting for volume test_vol to come online: ["KeyError('is_online',)", 'Timeout after 42 seconds']''' + error = create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('error', error) + assert error == error_py3x or error == error_py27 + + +def test_error_create(): + ''' Test error on create ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['error']), + ]) + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'size': 20, + 'encrypt': True, + } + error = 'Error provisioning volume test_vol of size 20971520: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_create_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + assert not create_and_apply(vol_module, DEFAULT_ARGS)['changed'] + + +def test_successful_delete(): + ''' Test delete existing volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-destroy', ZRR['success']), + ]) + module_args = { + 'state': 'absent', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete(): + ''' Test delete existing volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-destroy', ZRR['error']), + ('ZAPI', 'volume-destroy', ZRR['error']), + ]) + module_args = { + 'state': 'absent', + } + error = 'Error deleting volume test_vol:' + msg = create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + error = 'volume delete failed with unmount-and-offline option: %s' % ZAPI_ERROR + assert error in msg + error = 'volume delete failed without unmount-and-offline option: %s' % ZAPI_ERROR + assert error in msg + + +def test_error_delete_async(): + ''' Test delete existing volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-unmount', ZRR['error']), + ('ZAPI', 'volume-offline-async', ZRR['error']), + ('ZAPI', 'volume-destroy-async', ZRR['error']), + ]) + module_args = { + 'state': 'absent', + + } + error = 'Error deleting volume test_vol: %s' % ZAPI_ERROR + msg = create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + error = 'Error unmounting volume test_vol: %s' % ZAPI_ERROR + assert error in msg + error = 'Error changing the state of volume test_vol to offline: %s' % ZAPI_ERROR + assert error in msg + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ]) + module_args = { + 'state': 'absent', + } + assert not create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify_size(): + ''' Test successful modify size ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-size', ZRR['success']), + ]) + module_args = { + 'size': 200, + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('209715200', 2) + + +def test_modify_idempotency(): + ''' Test modify idempotency ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + assert not create_and_apply(vol_module, DEFAULT_ARGS)['changed'] + + +def test_modify_error(): + ''' Test modify idempotency ''' + register_responses([ + ('ZAPI', 'volume-modify-iter', ZRR['error']), + ]) + msg = 'Error modifying volume test_vol: %s' % ZAPI_ERROR + assert msg == expect_and_capture_ansible_exception(create_module(vol_module, DEFAULT_ARGS).volume_modify_attributes, 'fail', {})['msg'] + + +def test_mount_volume(): + ''' Test mount volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-mount', ZRR['success']), + ]) + module_args = { + 'junction_path': '/test123', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_mount_volume(): + ''' Test mount volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-mount', ZRR['error']), + ]) + module_args = { + 'junction_path': '/test123', + } + error = 'Error mounting volume test_vol on path /test123: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_unmount_volume(): + ''' Test unmount volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-unmount', ZRR['success']), + ]) + module_args = { + 'junction_path': '', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_unmount_volume(): + ''' Test unmount volume ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-unmount', ZRR['error']), + ]) + module_args = { + 'junction_path': '', + } + error = 'Error unmounting volume test_vol: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_successful_modify_space(): + ''' Test successful modify space ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + args = dict(DEFAULT_ARGS) + del args['space_slo'] + module_args = { + 'space_guarantee': 'volume', + } + assert create_and_apply(vol_module, args, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('volume', 2) + + +def test_successful_modify_unix_permissions(): + ''' Test successful modify unix_permissions ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = { + 'unix_permissions': '---rw-r-xr-x', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('---rw-r-xr-x', 2) + + +def test_successful_modify_volume_security_style(): + ''' Test successful modify volume_security_style ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = { + 'volume_security_style': 'mixed', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('', 2) + + +def test_successful_modify_max_files_and_encrypt(): + ''' Test successful modify unix_permissions ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ('ZAPI', 'volume-encryption-conversion-start', ZRR['success']), + ]) + module_args = { + 'encrypt': True, + 'max_files': '3000', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('3000', 2) + + +def test_successful_modify_snapshot_policy(): + ''' Test successful modify snapshot_policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = { + 'snapshot_policy': 'default-1weekly', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('default-1weekly', 2) + + +def test_successful_modify_efficiency_policy(): + ''' Test successful modify efficiency_policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'sis-enable', ZRR['success']), + ('ZAPI', 'sis-set-config', ZRR['success']), + ]) + module_args = { + 'efficiency_policy': 'test', + 'inline_compression': True + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('test', 3) + + +def test_successful_modify_efficiency_policy_idempotent(): + ''' Test successful modify efficiency_policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['get_sis_info']), + ]) + module_args = { + 'efficiency_policy': 'test', + 'compression': True + } + assert not create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify_efficiency_policy_async(): + ''' Test successful modify efficiency_policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'sis-enable-async', ZRR['success']), + ('ZAPI', 'sis-set-config-async', ZRR['success']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'efficiency_policy': 'test', + 'compression': True, + 'wait_for_completion': True, + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('test', 3) + + +def test_error_set_efficiency_policy(): + register_responses([ + ('ZAPI', 'sis-enable', ZRR['error']), + ]) + module_args = {'efficiency_policy': 'test_policy'} + msg = 'Error enable efficiency on volume test_vol: %s' % ZAPI_ERROR + assert msg == expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).set_efficiency_config, 'fail')['msg'] + + +def test_error_modify_efficiency_policy(): + ''' Test error modify efficiency_policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'sis-enable', ZRR['success']), + ('ZAPI', 'sis-set-config', ZRR['error']), + ]) + module_args = { + 'efficiency_policy': 'test', + } + error = 'Error setting up efficiency attributes on volume test_vol: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_error_set_efficiency_policy_async(): + register_responses([ + ('ZAPI', 'sis-enable-async', ZRR['error']), + ]) + module_args = {'efficiency_policy': 'test_policy'} + msg = 'Error enable efficiency on volume test_vol: %s' % ZAPI_ERROR + assert msg == expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).set_efficiency_config_async, 'fail')['msg'] + + +def test_error_modify_efficiency_policy_async(): + ''' Test error modify efficiency_policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'sis-enable-async', ZRR['success']), + ('ZAPI', 'sis-set-config-async', ZRR['error']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'efficiency_policy': 'test', + } + error = 'Error setting up efficiency attributes on volume test_vol: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_successful_modify_percent_snapshot_space(): + ''' Test successful modify percent_snapshot_space ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = { + 'percent_snapshot_space': 90, + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('90', 2) + + +def test_successful_modify_qos_policy_group(): + ''' Test successful modify qos_policy_group ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = { + 'qos_policy_group': 'extreme', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('extreme', 2) + + +def test_successful_modify_qos_adaptive_policy_group(): + ''' Test successful modify qos_adaptive_policy_group ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = { + 'qos_adaptive_policy_group': 'extreme', + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('extreme', 2) + + +def test_successful_move(): + ''' Test successful modify aggregate ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-move-start', ZRR['success']), + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_idle']), + ]) + module_args = { + 'aggregate_name': 'different_aggr', + 'cutover_action': 'abort_on_failure', + 'encrypt': True, + 'wait_for_completion': True + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_unencrypt_volume(): + ''' Test successful modify aggregate ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol_encrypted']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-move-start', ZRR['success']), + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_idle']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol_encrypted']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-move-start', ZRR['success']), + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_idle']), + ]) + # without aggregate + module_args = { + 'encrypt': False, + 'wait_for_completion': True + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + # with aggregate. + module_args['aggregate_name'] = 'test_aggr' + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_move(): + ''' Test error modify aggregate ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-move-start', ZRR['error']), + ]) + module_args = { + 'aggregate_name': 'different_aggr', + } + error = 'Error moving volume test_vol: %s - Retry failed with REST error: False' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def setup_rename(is_isinfinite=None): + module_args = { + 'from_name': MOCK_VOL['name'], + 'name': 'new_name', + 'time_out': 20 + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'uuid': MOCK_VOL['uuid'], + 'vserver': MOCK_VOL['vserver'], + } + if is_isinfinite is not None: + module_args['is_infinite'] = is_isinfinite + current['is_infinite'] = is_isinfinite + return module_args, current + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_rename(get_volume): + ''' Test successful rename volume ''' + register_responses([ + ('ZAPI', 'volume-rename', ZRR['success']), + ]) + module_args, current = setup_rename() + get_volume.side_effect = [ + None, + current + ] + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_error_rename(get_volume): + ''' Test error rename volume ''' + register_responses([ + ('ZAPI', 'volume-rename', ZRR['error']), + ]) + module_args, current = setup_rename() + get_volume.side_effect = [ + None, + current + ] + error = 'Error renaming volume new_name: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_error_rename_no_from(get_volume): + ''' Test error rename volume ''' + register_responses([ + ]) + module_args, current = setup_rename() + get_volume.side_effect = [ + None, + None + ] + error = 'Error renaming volume: cannot find %s' % MOCK_VOL['name'] + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_rename_async(get_volume): + ''' Test successful rename volume ''' + register_responses([ + ('ZAPI', 'volume-rename-async', ZRR['success']), + ]) + module_args, current = setup_rename(is_isinfinite=True) + get_volume.side_effect = [ + None, + current + ] + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_helper(): + register_responses([ + ('ZAPI', 'volume-unmount', ZRR['success']), + ('ZAPI', 'volume-offline', ZRR['success']), + ]) + module_args = {'is_online': False} + modify = {'is_online': False} + assert create_module(vol_module, DEFAULT_ARGS, module_args).take_modify_actions(modify) is None + + +def test_compare_chmod_value_true_1(): + module_args = {'unix_permissions': '------------'} + current = { + 'unix_permissions': '0' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_true_2(): + module_args = {'unix_permissions': '---rwxrwxrwx'} + current = { + 'unix_permissions': '777' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_true_3(): + module_args = {'unix_permissions': '---rwxr-xr-x'} + current = { + 'unix_permissions': '755' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_true_4(): + module_args = {'unix_permissions': '755'} + current = { + 'unix_permissions': '755' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_false_1(): + module_args = {'unix_permissions': '---rwxrwxrwx'} + current = { + 'unix_permissions': '0' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert not vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_false_2(): + module_args = {'unix_permissions': '---rwxrwxrwx'} + current = None + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert not vol_obj.na_helper.compare_chmod_value(current, module_args['unix_permissions']) + + +def test_compare_chmod_value_invalid_input_1(): + module_args = {'unix_permissions': '---xwrxwrxwr'} + current = { + 'unix_permissions': '777' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert not vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_invalid_input_2(): + module_args = {'unix_permissions': '---rwx-wx--a'} + current = { + 'unix_permissions': '0' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert not vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_invalid_input_3(): + module_args = {'unix_permissions': '---'} + current = { + 'unix_permissions': '0' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert not vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_compare_chmod_value_invalid_input_4(): + module_args = {'unix_permissions': 'rwx---rwxrwx'} + current = { + 'unix_permissions': '0' + } + vol_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert not vol_obj.na_helper.compare_chmod_value(current['unix_permissions'], module_args['unix_permissions']) + + +def test_successful_create_flex_group_manually(): + ''' Test successful create flexGroup manually ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['empty']), + ('ZAPI', 'volume-create-async', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_success']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + args = copy.deepcopy(DEFAULT_ARGS) + del args['space_slo'] + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'space_guarantee': 'file', + 'time_out': 20 + } + assert create_and_apply(vol_module, args, module_args)['changed'] + + +def test_error_create_flex_group_manually(): + ''' Test error create flexGroup manually ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['empty']), + ('ZAPI', 'volume-create-async', ZRR['error']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'time_out': 20 + } + error = 'Error provisioning volume test_vol of size 20971520: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_partial_error_create_flex_group_manually(): + ''' Test error create flexGroup manually ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('ZAPI', 'volume-get-iter', ZRR['empty']), + ('ZAPI', 'volume-create-async', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['insufficient_privileges']), # ignored but raises a warning + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_failure']), + ]) + args = copy.deepcopy(DEFAULT_ARGS) + del args['space_slo'] + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'space_guarantee': 'file', + 'time_out': 20, + 'use_rest': 'auto' + } + error = 'Volume created with success, with missing attributes: Error modifying volume test_vol: error_in_modify' + assert create_and_apply(vol_module, args, module_args, fail=True)['msg'] == error + print_warnings() + assert_warning_was_raised('cannot read volume efficiency options (as expected when running as vserver): ' + 'NetApp API failed. Reason - 12346:Insufficient privileges: user USERID does not have read access to this resource') + + +def test_successful_create_flex_group_auto_provision(): + ''' Test successful create flexGroup auto provision ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['empty']), + ('ZAPI', 'volume-create-async', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + module_args = { + 'auto_provision_as': 'flexgroup', + 'time_out': 20 + } + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_delete_flex_group(get_volume): + ''' Test successful delete flexGroup ''' + register_responses([ + ('ZAPI', 'volume-unmount', ZRR['success']), + ('ZAPI', 'volume-offline-async', ZRR['job_success']), + ('ZAPI', 'volume-destroy-async', ZRR['job_success']), + ]) + module_args = { + 'state': 'absent', + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'unix_permissions': '755', + 'is_online': True, + 'uuid': 'uuid' + } + get_volume.return_value = current + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +def setup_resize(): + module_args = { + 'size': 400, + 'size_unit': 'mb' + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'size': 20971520, + 'unix_permissions': '755', + 'uuid': '1234' + } + return module_args, current + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_resize_flex_group(get_volume): + ''' Test successful reszie flexGroup ''' + register_responses([ + ('ZAPI', 'volume-size-async', ZRR['job_success']), + ]) + module_args, current = setup_resize() + get_volume.return_value = current + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_error_resize_flex_group(get_volume): + ''' Test error reszie flexGroup ''' + register_responses([ + ('ZAPI', 'volume-size-async', ZRR['error']), + ]) + module_args, current = setup_resize() + get_volume.return_value = current + error = 'Error re-sizing volume test_vol: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.check_job_status') +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_modify_unix_permissions_flex_group(get_volume, check_job_status): + ''' Test successful modify unix permissions flexGroup ''' + register_responses([ + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_success']), + ]) + module_args = { + 'unix_permissions': '---rw-r-xr-x', + 'time_out': 20 + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'unix_permissions': '777', + 'uuid': '1234' + } + get_volume.return_value = current + check_job_status.return_value = None + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_modify_unix_permissions_flex_group_0_time_out(get_volume): + ''' Test successful modify unix permissions flexGroup ''' + register_responses([ + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_success']), + ]) + module_args = { + 'unix_permissions': '---rw-r-xr-x', + 'time_out': 0 + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'unix_permissions': '777', + 'uuid': '1234' + } + get_volume.return_value = current + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_modify_unix_permissions_flex_group_0_missing_result(get_volume): + ''' Test successful modify unix permissions flexGroup ''' + register_responses([ + ('ZAPI', 'volume-modify-iter-async', ZRR['job_running']), # bad response + ]) + module_args = { + 'unix_permissions': '---rw-r-xr-x', + 'time_out': 0 + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'unix_permissions': '777', + 'uuid': '1234' + } + get_volume.return_value = current + # check_job_status.side_effect = ['job_error'] + error = create_and_apply(vol_module, DEFAULT_ARGS, module_args, 'fail') + assert error['msg'].startswith('Unexpected error when modifying volume: result is:') + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.check_job_status') +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_error_modify_unix_permissions_flex_group(get_volume, check_job_status): + ''' Test error modify unix permissions flexGroup ''' + register_responses([ + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_success']), + ]) + module_args = { + 'unix_permissions': '---rw-r-xr-x', + 'time_out': 20 + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'unix_permissions': '777', + 'uuid': '1234' + } + get_volume.return_value = current + check_job_status.side_effect = ['job_error'] + error = create_and_apply(vol_module, DEFAULT_ARGS, module_args, 'fail') + assert error['msg'] == 'Error when modifying volume: job_error' + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_failure_modify_unix_permissions_flex_group(get_volume): + ''' Test failure modify unix permissions flexGroup ''' + register_responses([ + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_failure']), + ]) + module_args = { + 'unix_permissions': '---rw-r-xr-x', + 'time_out': 20 + } + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'unix_permissions': '777', + 'uuid': '1234' + } + get_volume.return_value = current + error = create_and_apply(vol_module, DEFAULT_ARGS, module_args, 'fail') + assert error['msg'] == 'Error modifying volume test_vol: error_in_modify' + + +def setup_offline_state(): + module_args = {'is_online': False} + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'is_online': True, + 'junction_path': '/test', + 'unix_permissions': '755', + 'uuid': '1234' + } + return module_args, current + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_offline_state_flex_group(get_volume): + ''' Test successful offline flexGroup state ''' + register_responses([ + ('ZAPI', 'volume-unmount', ZRR['success']), + ('ZAPI', 'volume-offline-async', ZRR['async_results']), + ('ZAPI', 'job-get', ZRR['job_success']), + ]) + module_args, current = setup_offline_state() + get_volume.return_value = current + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_error_offline_state_flex_group(get_volume): + ''' Test error offline flexGroup state ''' + register_responses([ + ('ZAPI', 'volume-unmount', ZRR['success']), + ('ZAPI', 'volume-offline-async', ZRR['error']), + ]) + module_args, current = setup_offline_state() + get_volume.return_value = current + error = 'Error changing the state of volume test_vol to offline: %s' % ZAPI_ERROR + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_error_unmounting_offline_state_flex_group(get_volume): + ''' Test error offline flexGroup state ''' + register_responses([ + ('ZAPI', 'volume-unmount', ZRR['error']), + ('ZAPI', 'volume-offline-async', ZRR['error']), + ]) + module_args, current = setup_offline_state() + get_volume.return_value = current + error = 'Error changing the state of volume test_vol to offline: %s' % ZAPI_ERROR + msg = create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert error in msg + errpr = 'Error unmounting volume test_vol: %s' % ZAPI_ERROR + assert error in msg + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_online_state_flex_group(get_volume): + ''' Test successful online flexGroup state ''' + register_responses([ + ('ZAPI', 'volume-online-async', ZRR['async_results']), + ('ZAPI', 'job-get', ZRR['job_success']), + ('ZAPI', 'volume-modify-iter-async', ZRR['modify_async_result_success']), + ('ZAPI', 'job-get', ZRR['job_success']), + ('ZAPI', 'volume-mount', ZRR['success']), + ]) + current = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'style_extended': 'flexgroup', + 'is_online': False, + 'junction_path': 'anything', + 'unix_permissions': '755', + 'uuid': '1234' + } + get_volume.return_value = current + assert create_and_apply(vol_module, DEFAULT_ARGS)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('', 2) + assert get_mock_record().is_text_in_zapi_request('', 2) + assert get_mock_record().is_text_in_zapi_request('', 2) + assert get_mock_record().is_text_in_zapi_request('/test', 4) + + +def test_check_job_status_error(): + ''' Test check job status error ''' + register_responses([ + ('ZAPI', 'job-get', ZRR['error']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'time_out': 0 + } + error = 'Error fetching job info: %s' % ZAPI_ERROR + assert expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).check_job_status, 'fail', '123')['msg'] == error + + +@patch('time.sleep') +def test_check_job_status_not_found(skip_sleep): + ''' Test check job status error ''' + register_responses([ + ('ZAPI', 'job-get', ZRR['error_15661']), + ('ZAPI', 'vserver-get-iter', ZRR['no_records']), + ('ZAPI', 'job-get', ZRR['error_15661']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'time_out': 50 + } + error = 'cannot locate job with id: 123' + assert create_module(vol_module, MINIMUM_ARGS, module_args).check_job_status('123') == error + + +@patch('time.sleep') +def test_check_job_status_failure(skip_sleep): + ''' Test check job status error ''' + register_responses([ + ('ZAPI', 'job-get', ZRR['job_running']), + ('ZAPI', 'job-get', ZRR['job_running']), + ('ZAPI', 'job-get', ZRR['job_failure']), + ('ZAPI', 'job-get', ZRR['job_running']), + ('ZAPI', 'job-get', ZRR['job_running']), + ('ZAPI', 'job-get', ZRR['job_no_completion']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'time_out': 20 + } + msg = 'failure' + assert msg == create_module(vol_module, MINIMUM_ARGS, module_args).check_job_status('123') + msg = 'progress' + assert msg == create_module(vol_module, MINIMUM_ARGS, module_args).check_job_status('123') + + +def test_check_job_status_time_out_is_0(): + ''' Test check job status time out is 0''' + register_responses([ + ('ZAPI', 'job-get', ZRR['job_time_out']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'time_out': 0 + } + msg = 'job completion exceeded expected timer of: 0 seconds' + assert msg == create_module(vol_module, MINIMUM_ARGS, module_args).check_job_status('123') + + +def test_check_job_status_unexpected(): + ''' Test check job status unexpected state ''' + register_responses([ + ('ZAPI', 'job-get', ZRR['job_other']), + ]) + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'time_out': 20 + } + msg = 'Unexpected job status in:' + assert msg in expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).check_job_status, 'fail', '123')['msg'] + + +def test_successful_modify_tiering_policy(): + ''' Test successful modify tiering policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = {'tiering_policy': 'auto'} + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('auto', 2) + + +def test_error_modify_tiering_policy(): + ''' Test successful modify tiering policy ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['error']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['error_tiering_94']), + ]) + module_args = {'tiering_policy': 'auto'} + error = zapi_error_message('Error modifying volume test_vol') + assert error in create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + error = zapi_error_message('Error modifying volume test_vol', 94, 'volume-comp-aggr-attributes', '. Added info: tiering option requires 9.4 or later.') + assert error in create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_successful_modify_vserver_dr_protection(): + ''' Test successful modify vserver_dr_protection ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = {'vserver_dr_protection': 'protected'} + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('protected', 2) + + +def test_successful_group_id(): + ''' Test successful modify group_id ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = {'group_id': 1001} + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('1001', 2) + + +def test_successful_modify_user_id(): + ''' Test successful modify user_id ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ]) + module_args = {'user_id': 101} + assert create_and_apply(vol_module, DEFAULT_ARGS, module_args)['changed'] + print_requests() + assert get_mock_record().is_text_in_zapi_request('101', 2) + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume.NetAppOntapVolume.get_volume') +def test_successful_modify_snapshot_auto_delete(get_volume): + ''' Test successful modify unix permissions flexGroup ''' + register_responses([ + # One ZAPI call for each option! + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['success']), + ]) + module_args = { + 'snapshot_auto_delete': { + 'delete_order': 'oldest_first', 'destroy_list': 'lun_clone,vol_clone', + 'target_free_space': 20, 'prefix': 'test', 'commitment': 'try', + 'state': 'on', 'trigger': 'snap_reserve', 'defer_delete': 'scheduled'}} + current = { + 'name': MOCK_VOL['name'], + 'vserver': MOCK_VOL['vserver'], + 'snapshot_auto_delete': { + 'delete_order': 'newest_first', 'destroy_list': 'lun_clone,vol_clone', + 'target_free_space': 30, 'prefix': 'test', 'commitment': 'try', + 'state': 'on', 'trigger': 'snap_reserve', 'defer_delete': 'scheduled'}, + 'uuid': '1234' + } + get_volume.return_value = current + assert create_and_apply(vol_module, MINIMUM_ARGS, module_args)['changed'] + + +def test_error_modify_snapshot_auto_delete(): + register_responses([ + ('ZAPI', 'snapshot-autodelete-set-option', ZRR['error']), + ]) + module_args = {'snapshot_auto_delete': { + 'delete_order': 'oldest_first', 'destroy_list': 'lun_clone,vol_clone', + 'target_free_space': 20, 'prefix': 'test', 'commitment': 'try', + 'state': 'on', 'trigger': 'snap_reserve', 'defer_delete': 'scheduled'}} + msg = 'Error setting snapshot auto delete options for volume test_vol: %s' % ZAPI_ERROR + assert msg == expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).set_snapshot_auto_delete, 'fail')['msg'] + + +def test_successful_volume_rehost(): + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-rehost', ZRR['success']), + ]) + module_args = { + 'from_vserver': 'source_vserver', + 'auto_remap_luns': False, + } + assert create_and_apply(vol_module, MINIMUM_ARGS, module_args)['changed'] + + +def test_error_volume_rehost(): + register_responses([ + ('ZAPI', 'volume-rehost', ZRR['error']), + ]) + module_args = { + 'from_vserver': 'source_vserver', + 'force_unmap_luns': False, + } + msg = 'Error rehosting volume test_vol: %s' % ZAPI_ERROR + assert msg == expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).rehost_volume, 'fail')['msg'] + + +def test_successful_volume_restore(): + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'snapshot-restore-volume', ZRR['success']), + ]) + module_args = { + 'snapshot_restore': 'snapshot_copy', + 'force_restore': True, + 'preserve_lun_ids': True + } + assert create_and_apply(vol_module, MINIMUM_ARGS, module_args)['changed'] + + +def test_error_volume_restore(): + register_responses([ + ('ZAPI', 'snapshot-restore-volume', ZRR['error']), + ]) + module_args = {'snapshot_restore': 'snapshot_copy'} + msg = 'Error restoring volume test_vol: %s' % ZAPI_ERROR + assert msg == expect_and_capture_ansible_exception(create_module(vol_module, MINIMUM_ARGS, module_args).snapshot_restore_volume, 'fail')['msg'] + + +def test_error_modify_flexvol_to_flexgroup(): + ''' Test successful modify vserver_dr_protection ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + module_args = {'auto_provision_as': 'flexgroup'} + msg = 'Error: changing a volume from one backend to another is not allowed. Current: flexvol, desired: flexgroup.' + assert msg == create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_error_modify_flexgroup_to_flexvol(): + ''' Changing the style from flexgroup to flexvol is not allowed ''' + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + module_args = {'aggregate_name': 'nothing'} + msg = 'Error: aggregate_name option cannot be used with FlexGroups.' + assert msg == create_and_apply(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_error_snaplock_not_supported_with_zapi(): + ''' Test successful modify vserver_dr_protection ''' + module_args = {'snaplock': {'retention': {'default': 'P30TM'}}} + msg = 'Error: snaplock option is not supported with ZAPI. It can only be used with REST. use_rest: never.' + assert msg == create_module(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_wait_for_task_completion_no_records(): + register_responses([ + ('ZAPI', 'results', ZRR['no_records']), + ]) + # using response to build a request + zapi_iter, valid = build_zapi_response({'fake-iter': 'any'}) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert my_obj.wait_for_task_completion(zapi_iter, lambda: True) is None + + +def test_wait_for_task_completion_one_response(): + register_responses([ + ('ZAPI', 'results', ZRR['one_record_no_data']), + ]) + # using response to build a request + zapi_iter, valid = build_zapi_response({'fake-iter': 'any'}) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert my_obj.wait_for_task_completion(zapi_iter, lambda x: False) is None + + +@patch('time.sleep') +def test_wait_for_task_completion_loop(skip_sleep): + register_responses([ + ('ZAPI', 'results', ZRR['one_record_no_data']), + ('ZAPI', 'results', ZRR['one_record_no_data']), + ('ZAPI', 'results', ZRR['one_record_no_data']), + ]) + + def check_state(x): + check_state.counter += 1 + # True continues the wait loop + # False exits the loop + return (True, True, False)[check_state.counter - 1] + + check_state.counter = 0 + + # using response to build a request + zapi_iter, valid = build_zapi_response({'fake-iter': 'any'}) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert my_obj.wait_for_task_completion(zapi_iter, check_state) is None + + +@patch('time.sleep') +def test_wait_for_task_completion_loop_with_recoverable_error(skip_sleep): + register_responses([ + ('ZAPI', 'results', ZRR['one_record_no_data']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['one_record_no_data']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['one_record_no_data']), + ]) + + def check_state(x): + check_state.counter += 1 + return (True, True, False)[check_state.counter - 1] + + check_state.counter = 0 + + # using response to build a request + zapi_iter, valid = build_zapi_response({'fake-iter': 'any'}) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert my_obj.wait_for_task_completion(zapi_iter, check_state) is None + + +@patch('time.sleep') +def test_wait_for_task_completion_loop_with_non_recoverable_error(skip_sleep): + register_responses([ + ('ZAPI', 'results', ZRR['one_record_no_data']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['one_record_no_data']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ('ZAPI', 'results', ZRR['error']), + ]) + + # using response to build a request + zapi_iter, valid = build_zapi_response({'fake-iter': 'any'}) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert str(my_obj.wait_for_task_completion(zapi_iter, lambda x: True)) == ZAPI_ERROR + + +@patch('time.sleep') +def test_start_encryption_conversion(skip_sleep): + register_responses([ + ('ZAPI', 'volume-encryption-conversion-start', ZRR['success']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_idle']), + ]) + module_args = { + 'wait_for_completion': True, + 'max_wait_time': 120 + } + my_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert my_obj.start_encryption_conversion(True) is None + + +@patch('time.sleep') +def test_error_on_wait_for_start_encryption_conversion(skip_sleep): + register_responses([ + ('ZAPI', 'volume-encryption-conversion-start', ZRR['success']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ]) + module_args = { + 'wait_for_completion': True, + 'max_wait_time': 280 + } + my_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.start_encryption_conversion, 'fail', True)['msg'] + assert error == 'Error getting volume encryption_conversion status: %s' % ZAPI_ERROR + + +def test_error_start_encryption_conversion(): + register_responses([ + ('ZAPI', 'volume-encryption-conversion-start', ZRR['error']), + ]) + module_args = { + 'wait_for_completion': True + } + my_obj = create_module(vol_module, DEFAULT_ARGS, module_args) + error = expect_and_capture_ansible_exception(my_obj.start_encryption_conversion, 'fail', True)['msg'] + assert error == 'Error enabling encryption for volume test_vol: %s' % ZAPI_ERROR + + +@patch('time.sleep') +def test_wait_for_volume_encryption_conversion_with_non_recoverable_error(skip_sleep): + register_responses([ + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ]) + my_obj = create_module(vol_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.wait_for_volume_encryption_conversion, 'fail')['msg'] + assert error == 'Error getting volume encryption_conversion status: %s' % ZAPI_ERROR + + +@patch('time.sleep') +def test_wait_for_volume_encryption_conversion(skip_sleep): + register_responses([ + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_running']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['error']), + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_idle']), + ]) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert my_obj.wait_for_volume_encryption_conversion() is None + + +def test_wait_for_volume_encryption_conversion_bad_status(): + register_responses([ + ('ZAPI', 'volume-encryption-conversion-get-iter', ZRR['vol_encryption_conversion_status_error']), + ]) + my_obj = create_module(vol_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.wait_for_volume_encryption_conversion, 'fail')['msg'] + assert error == 'Error converting encryption for volume test_vol: other' + + +@patch('time.sleep') +def test_wait_for_volume_move_with_non_recoverable_error(skip_sleep): + register_responses([ + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_running']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_running']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ]) + my_obj = create_module(vol_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.wait_for_volume_move, 'fail')['msg'] + assert error == 'Error getting volume move status: %s' % ZAPI_ERROR + + +@patch('time.sleep') +def test_wait_for_volume_move(skip_sleep): + register_responses([ + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_running']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['error']), + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_idle']), + ]) + my_obj = create_module(vol_module, DEFAULT_ARGS) + assert my_obj.wait_for_volume_move() is None + + +def test_wait_for_volume_move_bad_status(): + register_responses([ + ('ZAPI', 'volume-move-get-iter', ZRR['vol_move_status_error']), + ]) + my_obj = create_module(vol_module, DEFAULT_ARGS) + error = expect_and_capture_ansible_exception(my_obj.wait_for_volume_move, 'fail')['msg'] + assert error == 'Error moving volume test_vol: some info' + + +def test_error_validate_snapshot_auto_delete(): + module_args = { + 'snapshot_auto_delete': { + 'commitment': 'whatever', + 'unknown': 'unexpected option' + } + } + error = "snapshot_auto_delete option 'unknown' is not valid." + assert create_module(vol_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_get_snapshot_auto_delete_attributes(): + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexgroup']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + result = create_module(vol_module, DEFAULT_ARGS).get_volume() + assert 'snapshot_auto_delete' in result + assert 'is_autodelete_enabled' not in result['snapshot_auto_delete'] + assert result['snapshot_auto_delete']['state'] == 'on' + + +def test_error_on_get_efficiency_info(): + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['error']), + ]) + error = 'Error fetching efficiency policy for volume test_vol: %s' % ZAPI_ERROR + assert call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] == error + + +def test_create_volume_from_main(): + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-modify-iter', ZRR['success']), + ('ZAPI', 'volume-unmount', ZRR['success']), + ('ZAPI', 'volume-offline', ZRR['success']) + ]) + args = dict(DEFAULT_ARGS) + del args['space_slo'] + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'comment': 'some comment', + 'is_online': False, + 'space_guarantee': 'file', + 'tiering_policy': 'snapshot-only', + 'volume_security_style': 'unix', + 'vserver_dr_protection': 'unprotected', + } + assert call_main(my_main, args, module_args)['changed'] + + +def test_error_create_volume_change_in_type(): + register_responses([ + ('ZAPI', 'volume-get-iter', ZRR['no_records']), + ('ZAPI', 'volume-create', ZRR['success']), + ('ZAPI', 'volume-get-iter', ZRR['get_flexvol']), + ('ZAPI', 'sis-get-iter', ZRR['no_records']), + ]) + args = dict(DEFAULT_ARGS) + module_args = { + 'aggregate_name': MOCK_VOL['aggregate'], + 'type': 'dp', + } + error = 'Error: volume type was not set properly at creation time. Current: rw, desired: dp.' + assert call_main(my_main, args, module_args, fail=True)['msg'] == error + + +def test_create_volume_attribute(): + obj = create_module(vol_module, DEFAULT_ARGS) + # str + obj.parameters['option_name'] = 'my_option' + parent = netapp_utils.zapi.NaElement('results') + obj.create_volume_attribute(None, parent, 'zapi_name', 'option_name') + print(parent.to_string()) + assert parent['zapi_name'] == 'my_option' + # int - fail, unless converted + obj.parameters['option_name'] = 123 + expect_and_capture_ansible_exception(obj.create_volume_attribute, TypeError, None, parent, 'zapi_name', 'option_name') + parent = netapp_utils.zapi.NaElement('results') + obj.create_volume_attribute(None, parent, 'zapi_name', 'option_name', int) + assert parent['zapi_name'] == '123' + # boolmodify_volume_efficiency_config + obj.parameters['option_name'] = False + parent = netapp_utils.zapi.NaElement('results') + obj.create_volume_attribute(None, parent, 'zapi_name', 'option_name', bool) + assert parent['zapi_name'] == 'false' + # parent->attrs->attr + # create child + parent = netapp_utils.zapi.NaElement('results') + obj.create_volume_attribute('child', parent, 'zapi_name', 'option_name', bool) + assert parent['child']['zapi_name'] == 'false' + # use existing child in parent + obj.create_volume_attribute('child', parent, 'zapi_name2', 'option_name', bool) + assert parent['child']['zapi_name2'] == 'false' + # pass child + parent = netapp_utils.zapi.NaElement('results') + child = netapp_utils.zapi.NaElement('child') + obj.create_volume_attribute(child, parent, 'zapi_name', 'option_name', bool) + assert parent['child']['zapi_name'] == 'false' + + +def test_check_invoke_result(): + register_responses([ + # 3rd run + ('ZAPI', 'job-get', ZRR['job_success']), + # 3th run + ('ZAPI', 'job-get', ZRR['job_failure']), + ]) + module_args = { + 'time_out': 0 + } + obj = create_module(vol_module, DEFAULT_ARGS, module_args) + # 1 - operation failed immediately + error = 'Operation failed when testing volume.' + assert error in expect_and_capture_ansible_exception(obj.check_invoke_result, 'fail', ZRR['failed_results'][0], 'testing')['msg'] + # 2 - operation in progress - exit immediately as time_out is 0 + assert obj.check_invoke_result(ZRR['async_results'][0], 'testing') is None + module_args = { + 'time_out': 10 + } + # 3 - operation in progress - job reported success + obj = create_module(vol_module, DEFAULT_ARGS, module_args) + assert obj.check_invoke_result(ZRR['async_results'][0], 'testing') is None + # 4 - operation in progress - job reported a failure + obj = create_module(vol_module, DEFAULT_ARGS, module_args) + error = 'Error when testing volume: failure' + assert error in expect_and_capture_ansible_exception(obj.check_invoke_result, 'fail', ZRR['async_results'][0], 'testing')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py new file mode 100644 index 000000000..662d95bfe --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_autosize.py @@ -0,0 +1,367 @@ +# (c) 2019, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_volume_autosize ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import call_main, patch_ansible, create_module, create_and_apply +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_autosize \ + import NetAppOntapVolumeAutosize as autosize_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_uuid': (200, {'records': [{'uuid': 'testuuid'}]}, None), + 'get_autosize': (200, + {'uuid': 'testuuid', + 'name': 'testname', + 'autosize': {"maximum": 10737418240, + "minimum": 22020096, + "grow_threshold": 99, + "shrink_threshold": 40, + "mode": "grow" + } + }, None), + 'get_autosize_empty': (200, { + 'uuid': 'testuuid', + 'name': 'testname', + 'autosize': {} + }, None) +}) + + +MOCK_AUTOSIZE = { + 'grow_threshold_percent': 99, + 'maximum_size': '10g', + 'minimum_size': '21m', + 'increment_size': '10m', + 'mode': 'grow', + 'shrink_threshold_percent': 40, + 'vserver': 'test_vserver', + 'volume': 'test_volume' +} + + +autosize_info = { + 'grow-threshold-percent': MOCK_AUTOSIZE['grow_threshold_percent'], + 'maximum-size': '10485760', + 'minimum-size': '21504', + 'increment-size': '10240', + 'mode': MOCK_AUTOSIZE['mode'], + 'shrink-threshold-percent': MOCK_AUTOSIZE['shrink_threshold_percent'] +} + + +ZRR = zapi_responses({ + 'get_autosize': build_zapi_response(autosize_info) +}) + + +DEFAULT_ARGS = { + 'vserver': MOCK_AUTOSIZE['vserver'], + 'volume': MOCK_AUTOSIZE['volume'], + 'grow_threshold_percent': MOCK_AUTOSIZE['grow_threshold_percent'], + 'maximum_size': MOCK_AUTOSIZE['maximum_size'], + 'minimum_size': MOCK_AUTOSIZE['minimum_size'], + 'mode': MOCK_AUTOSIZE['mode'], + 'shrink_threshold_percent': MOCK_AUTOSIZE['shrink_threshold_percent'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + args = dict(DEFAULT_ARGS) + args.pop('vserver') + error = 'missing required arguments: vserver' + assert create_module(autosize_module, args, fail=True)['msg'] == error + + +def test_idempotent_modify(): + register_responses([ + ('ZAPI', 'volume-autosize-get', ZRR['get_autosize']), + ]) + module_args = { + 'use_rest': 'never' + } + assert not create_and_apply(autosize_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_modify(): + register_responses([ + ('ZAPI', 'volume-autosize-get', ZRR['get_autosize']), + ('ZAPI', 'volume-autosize-set', ZRR['success']), + ]) + module_args = { + 'increment_size': MOCK_AUTOSIZE['increment_size'], + 'maximum_size': '11g', + 'use_rest': 'never' + } + assert create_and_apply(autosize_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_zapi__create_get_volume_return_no_data(): + module_args = { + 'use_rest': 'never' + } + my_obj = create_module(autosize_module, DEFAULT_ARGS, module_args) + assert my_obj._create_get_volume_return(build_zapi_response({'unsupported_key': 'value'})[0]) is None + + +def test_error_get(): + register_responses([ + ('ZAPI', 'volume-autosize-get', ZRR['error']), + ]) + module_args = { + 'use_rest': 'never' + } + error = 'Error fetching volume autosize info for test_volume: NetApp API failed. Reason - 12345:synthetic error for UT purpose.' + assert create_and_apply(autosize_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_error_modify(): + register_responses([ + ('ZAPI', 'volume-autosize-get', ZRR['get_autosize']), + ('ZAPI', 'volume-autosize-set', ZRR['error']), + ]) + module_args = { + 'increment_size': MOCK_AUTOSIZE['increment_size'], + 'maximum_size': '11g', + 'use_rest': 'never' + } + error = 'Error modifying volume autosize for test_volume: NetApp API failed. Reason - 12345:synthetic error for UT purpose.' + assert create_and_apply(autosize_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_successful_reset(): + register_responses([ + ('ZAPI', 'volume-autosize-get', ZRR['get_autosize']), + ('ZAPI', 'volume-autosize-set', ZRR['success']), + ]) + args = dict(DEFAULT_ARGS) + for arg in ('maximum_size', 'minimum_size', 'grow_threshold_percent', 'shrink_threshold_percent', 'mode'): + # remove args that are eclusive with reset + args.pop(arg) + module_args = { + 'reset': True, + 'use_rest': 'never' + } + assert create_and_apply(autosize_module, args, module_args)['changed'] + + +def test_rest_error_volume_not_found(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['zero_records']), + ]) + error = 'Error fetching volume autosize info for test_volume: volume not found for vserver test_vserver.' + assert create_and_apply(autosize_module, DEFAULT_ARGS, fail=True)['msg'] == error + + +def test_rest_error_get(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['generic_error']), + ]) + module_args = { + 'maximum_size': '11g' + } + error = 'Error fetching volume autosize info for test_volume: calling: storage/volumes: got Expected error.' + assert create_and_apply(autosize_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_rest_error_patch(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']), + ('PATCH', 'storage/volumes/testuuid', SRR['generic_error']), + ]) + module_args = { + 'maximum_size': '11g' + } + error = 'Error modifying volume autosize for test_volume: calling: storage/volumes/testuuid: got Expected error.' + assert create_and_apply(autosize_module, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_rest_successful_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']), + ('PATCH', 'storage/volumes/testuuid', SRR['success']), + ]) + module_args = { + 'maximum_size': '11g' + } + assert create_and_apply(autosize_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_rest_idempotent_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']), + ]) + assert not create_and_apply(autosize_module, DEFAULT_ARGS)['changed'] + + +def test_rest_idempotent_modify_no_attributes(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize_empty']), + ]) + module_args = { + 'maximum_size': '11g' + } + assert not create_and_apply(autosize_module, DEFAULT_ARGS)['changed'] + + +def test_rest__create_get_volume_return_no_data(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(autosize_module, DEFAULT_ARGS) + assert my_obj._create_get_volume_return({'unsupported_key': 'value'}) == {'uuid': None} + + +def test_rest_modify_no_data(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(autosize_module, DEFAULT_ARGS) + # remove all attributes + for arg in ('maximum_size', 'minimum_size', 'grow_threshold_percent', 'shrink_threshold_percent', 'mode'): + my_obj.parameters.pop(arg) + assert my_obj.modify_volume_autosize('uuid') is None + + +def test_rest_convert_to_bytes(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(autosize_module, DEFAULT_ARGS) + + module_args = { + 'minimum_size': '11k' + } + assert my_obj.convert_to_byte('minimum_size', module_args) == 11 * 1024 + + module_args = { + 'minimum_size': '11g' + } + assert my_obj.convert_to_byte('minimum_size', module_args) == 11 * 1024 * 1024 * 1024 + + +def test_rest_convert_to_kb(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + my_obj = create_module(autosize_module, DEFAULT_ARGS) + + module_args = { + 'minimum_size': '11k' + } + assert my_obj.convert_to_kb('minimum_size', module_args) == 11 + + module_args = { + 'minimum_size': '11g' + } + assert my_obj.convert_to_kb('minimum_size', module_args) == 11 * 1024 * 1024 + + +def test_rest_invalid_values(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']) + ]) + module_args = { + 'minimum_size': '11kb' + } + error = 'minimum_size must end with a k, m, g or t, found b in 11kb.' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + module_args = { + 'minimum_size': '11kk' + } + error = 'minimum_size must start with a number, found 11k in 11kk.' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + module_args = { + 'minimum_size': '' + } + error = "minimum_size must start with a number, and must end with a k, m, g or t, found ''." + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + module_args = { + 'minimum_size': 10 + } + error = 'minimum_size must end with a k, m, g or t, found 0 in 10.' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + +def test_rest_unsupported_parameters(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_autosize']) + ]) + module_args = { + 'increment_size': '11k' + } + error = 'Rest API does not support increment size, please switch to ZAPI' + assert call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] == error + + # reset is not supported - when set to True + module_args = { + 'reset': True + } + args = dict(DEFAULT_ARGS) + for arg in ('maximum_size', 'minimum_size', 'grow_threshold_percent', 'shrink_threshold_percent', 'mode'): + # remove args that are eclusive with reset + args.pop(arg) + error = 'Rest API does not support reset, please switch to ZAPI' + assert call_main(my_main, args, module_args, fail=True)['msg'] == error + + # reset is ignored when False + module_args = { + 'reset': False + } + assert not call_main(my_main, args, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + module_args = { + 'use_rest': 'never', + } + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == create_module(autosize_module, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py new file mode 100644 index 000000000..f68401348 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone.py @@ -0,0 +1,210 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_volume_clone''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + call_main, create_and_apply, create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import zapi_responses, build_zapi_response, build_zapi_error + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_clone \ + import NetAppONTAPVolumeClone as my_module, main as my_main + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +clone_info = { + 'attributes': { + 'volume-clone-info': { + 'volume': 'ansible', + 'parent-volume': 'ansible'}}} + +clone_info_split_in_progress = { + 'attributes': { + 'volume-clone-info': { + 'volume': 'ansible', + 'parent-volume': 'ansible', + 'block-percentage-complete': 20, + 'blocks-scanned': 56676, + 'blocks-updated': 54588}}} + +ZRR = zapi_responses({ + 'clone_info': build_zapi_response(clone_info, 1), + 'clone_info_split_in_progress': build_zapi_response(clone_info_split_in_progress, 1), + 'error_no_clone': build_zapi_error(15661, 'flexclone not found.') +}) + +DEFAULT_ARGS = { + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansible', + 'volume': 'ansible', + 'parent_volume': 'ansible', + 'split': None, + 'use_rest': 'never' +} + + +def test_module_fail_when_required_args_missing(): + ''' test required arguments are reported as errors ''' + msg = create_module(my_module, fail=True)['msg'] + print('Info: %s' % msg) + + +def test_ensure_get_called(): + ''' test get_volume_clone() for non-existent volume clone''' + register_responses([ + ('volume-clone-get', ZRR['empty']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + assert my_obj.get_volume_clone() is None + + +def test_ensure_get_called_existing(): + ''' test get_volume_clone() for existing volume clone''' + register_responses([ + ('volume-clone-get', ZRR['clone_info']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + current = {'split': False} + assert my_obj.get_volume_clone() == current + + +def test_ensure_get_called_no_clone_error(): + ''' test get_volume_clone() for existing volume clone''' + register_responses([ + ('volume-clone-get', ZRR['error_no_clone']) + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + current = {'split': False} + assert my_obj.get_volume_clone() is None + + +def test_successful_create(): + ''' test creating volume_clone without split and testing idempotency ''' + register_responses([ + ('volume-clone-get', ZRR['empty']), + ('volume-clone-create', ZRR['success']), + ('volume-clone-get', ZRR['clone_info']), + ]) + module_args = { + 'parent_snapshot': 'abc', + 'volume_type': 'dp', + 'qos_policy_group_name': 'abc', + 'junction_path': 'abc', + 'uid': '1', + 'gid': '1' + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_with_split(): + ''' test creating volume_clone with split and testing idempotency ''' + register_responses([ + # first test, create and split + ('volume-clone-get', ZRR['empty']), + ('volume-clone-create', ZRR['success']), + ('volume-clone-split-start', ZRR['success']), + # second test, clone already exists but is not split + ('volume-clone-get', ZRR['clone_info']), + ('volume-clone-split-start', ZRR['success']), + # third test, clone already exists, split already in progress + ('volume-clone-get', ZRR['clone_info_split_in_progress']), + ]) + module_args = { + 'parent_snapshot': 'abc', + 'volume_type': 'dp', + 'qos_policy_group_name': 'abc', + 'junction_path': 'abc', + 'uid': '1', + 'gid': '1', + 'split': True + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successful_create_with_parent_vserver(): + ''' test creating volume_clone with split and testing idempotency ''' + register_responses([ + # first test, create and split + ('volume-clone-get', ZRR['empty']), + ('volume-clone-create', ZRR['success']), + ('volume-clone-split-start', ZRR['success']), + # second test, clone already exists but is not split + ('volume-clone-get', ZRR['clone_info']), + ('volume-clone-split-start', ZRR['success']), + # third test, clone already exists, split already in progress + ('volume-clone-get', ZRR['clone_info_split_in_progress']), + ]) + module_args = { + 'parent_snapshot': 'abc', + 'parent_vserver': 'abc', + 'volume_type': 'dp', + 'qos_policy_group_name': 'abc', + 'space_reserve': 'volume', + 'split': True + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_vserver_cluster_options_give_error(): + module_args = { + 'parent_snapshot': 'abc', + 'parent_vserver': 'abc', + 'volume_type': 'dp', + 'qos_policy_group_name': 'abc', + 'junction_path': 'abc', + 'uid': '1', + 'gid': '1' + } + msg = create_module(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert "parameters are mutually exclusive: " in msg + print('Info: %s' % msg) + + +def test_if_all_methods_catch_exception(): + ''' test if all methods catch exception ''' + register_responses([ + ('volume-clone-get', ZRR['error']), + ('volume-clone-create', ZRR['error']), + ('volume-clone-split-start', ZRR['error']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + msg = expect_and_capture_ansible_exception(my_obj.get_volume_clone, 'fail')['msg'] + assert 'Error fetching volume clone information ' in msg + msg = expect_and_capture_ansible_exception(my_obj.create_volume_clone, 'fail')['msg'] + assert 'Error creating volume clone: ' in msg + msg = expect_and_capture_ansible_exception(my_obj.start_volume_clone_split, 'fail')['msg'] + assert 'Error starting volume clone split: ' in msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_missing_netapp_lib(mock_has_netapp_lib): + ''' test error when netapp_lib is missing ''' + mock_has_netapp_lib.return_value = False + msg = create_module(my_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'Error: the python NetApp-Lib module is required. Import error: None' == msg + + +def test_main(): + ''' validate call to main() ''' + register_responses([ + ('volume-clone-get', ZRR['empty']), + ('volume-clone-create', ZRR['success']), + ]) + assert call_main(my_main, DEFAULT_ARGS)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone_rest.py new file mode 100644 index 000000000..ba0767d42 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_clone_rest.py @@ -0,0 +1,244 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_clone \ + import NetAppONTAPVolumeClone as my_module # module under test + +# needed for get and modify/delete as they still use ZAPI +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request + +clone_info = { + "clone": { + "is_flexclone": True, + "parent_snapshot": { + "name": "clone_ansibleVolume12_.2022-01-25_211704.0" + }, + "parent_svm": { + "name": "ansibleSVM" + }, + "parent_volume": { + "name": "ansibleVolume12" + } + }, + "name": "ansibleVolume12_clone", + "nas": { + "gid": 0, + "uid": 0 + }, + "svm": { + "name": "ansibleSVM" + }, + "uuid": "2458688d-7e24-11ec-a267-005056b30cfa" +} + +clone_info_no_uuid = dict(clone_info) +clone_info_no_uuid.pop('uuid') +clone_info_not_a_clone = copy.deepcopy(clone_info) +clone_info_not_a_clone['clone']['is_flexclone'] = False + +SRR = rest_responses({ + 'volume_clone': ( + 200, + {'records': [ + clone_info, + ]}, None + ), + 'volume_clone_no_uuid': ( + 200, + {'records': [ + clone_info_no_uuid, + ]}, None + ), + 'volume_clone_not_a_clone': ( + 200, + {'records': [ + clone_info_not_a_clone, + ]}, None + ), + 'two_records': ( + 200, + {'records': [ + clone_info, + clone_info_no_uuid, + ]}, None + ) +}) + + +DEFAULT_ARGS = { + 'vserver': 'ansibleSVM', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'name': 'clone_of_parent_volume', + 'parent_volume': 'parent_volume' +} + + +def test_successfully_create_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['volume_clone']), + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {})['changed'] + + +def test_error_getting_volume_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['generic_error']), + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error getting volume clone clone_of_parent_volume: calling: storage/volumes: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_volume_clone_rest, 'fail')['msg'] + + +def test_error_creating_volume_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'storage/volumes', SRR['generic_error']), + ]) + my_module_object = create_module(my_module, DEFAULT_ARGS) + msg = 'Error creating volume clone clone_of_parent_volume: calling: storage/volumes: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_module_object.create_volume_clone_rest, 'fail')['msg'] + + +def test_error_space_reserve_volume_clone(): + error = create_module(my_module, fail=True)['msg'] + print('Info: %s' % error) + assert 'missing required arguments:' in error + assert 'name' in error + + +def test_successfully_create_with_optional_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['volume_clone']), + ]) + module_args = { + 'qos_policy_group_name': 'test_policy_name', + 'parent_snapshot': 'test_snapshot', + 'volume_type': 'rw', + 'junction_path': '/test_junction_path', + 'uid': 10, + 'gid': 20, + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_create_with_parent_vserver_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['volume_clone']), + ]) + module_args = { + 'qos_policy_group_name': 'test_policy_name', + 'parent_snapshot': 'test_snapshot', + 'volume_type': 'rw', + 'parent_vserver': 'test_vserver', + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_create_and_split_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['volume_clone']), + ('PATCH', 'storage/volumes/2458688d-7e24-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = {'split': True} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_create_no_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['empty_records']), + ]) + module_args = {'split': True} + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg == 'Error starting volume clone split clone_of_parent_volume: clone UUID is not set' + + +def test_negative_create_no_uuid_in_response(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['volume_clone_no_uuid']), + ]) + module_args = {'split': True} + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg.startswith('Error: failed to parse create clone response: uuid key not present in') + + +def test_negative_create_bad_response(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('POST', 'storage/volumes', SRR['two_records']), + ]) + module_args = {'split': True} + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg.startswith('Error: failed to parse create clone response: calling: storage/volumes: unexpected response ') + + +def test_successfully_split_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['volume_clone']), + ('PATCH', 'storage/volumes/2458688d-7e24-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = {'split': True} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_split_volume_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('PATCH', 'storage/volumes/2458688d-7e24-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + my_obj = create_module(my_module, DEFAULT_ARGS) + my_obj.uuid = '2458688d-7e24-11ec-a267-005056b30cfa' + my_obj.parameters['split'] = True + msg = "Error starting volume clone split clone_of_parent_volume: calling: storage/volumes/2458688d-7e24-11ec-a267-005056b30cfa: got Expected error." + assert msg == expect_and_capture_ansible_exception(my_obj.start_volume_clone_split_rest, 'fail')['msg'] + + +def test_volume_not_a_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['volume_clone_not_a_clone']), + ]) + module_args = {'split': True} + assert not create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_volume_not_a_clone(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['volume_clone_not_a_clone']), + ]) + module_args = {'split': False} + msg = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + assert msg == 'Error: a volume clone_of_parent_volume which is not a FlexClone already exists, and split not requested.' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_efficiency.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_efficiency.py new file mode 100644 index 000000000..104cc8e51 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_efficiency.py @@ -0,0 +1,346 @@ +''' unit tests ONTAP Ansible module: na_ontap_volume_efficiency ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_efficiency \ + import NetAppOntapVolumeEfficiency as volume_efficiency_module, main # module under test + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +DEFAULT_ARGS = { + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'vserver': 'vs1', + 'path': '/vol/volTest', + 'policy': 'auto', + 'use_rest': 'never', + 'enable_compression': True, + 'enable_inline_compression': True, + 'enable_cross_volume_inline_dedupe': True, + 'enable_inline_dedupe': True, + 'enable_data_compaction': True, + 'enable_cross_volume_background_dedupe': True +} + +DEFAULT_ARGS_REST = { + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'vserver': 'vs1', + 'path': '/vol/volTest', + 'policy': 'auto', + 'use_rest': 'always' +} + + +def return_vol_info(state='enabled', status='idle', policy='auto'): + return { + 'num-records': 1, + 'attributes-list': { + 'sis-status-info': { + 'path': '/vol/volTest', + 'state': state, + 'schedule': None, + 'status': status, + 'policy': policy, + 'is-inline-compression-enabled': 'true', + 'is-compression-enabled': 'true', + 'is-inline-dedupe-enabled': 'true', + 'is-data-compaction-enabled': 'true', + 'is-cross-volume-inline-dedupe-enabled': 'true', + 'is-cross-volume-background-dedupe-enabled': 'true' + } + } + } + + +ZRR = zapi_responses({ + 'vol_eff_info': build_zapi_response(return_vol_info()), + 'vol_eff_info_disabled': build_zapi_response(return_vol_info(state='disabled')), + 'vol_eff_info_running': build_zapi_response(return_vol_info(status='running')), + 'vol_eff_info_policy': build_zapi_response(return_vol_info(policy='default')) +}) + + +def return_vol_info_rest(state='enabled', status='idle', policy='auto', compaction='inline'): + return { + "records": [{ + "uuid": "25311eff", + "name": "test_e", + "efficiency": { + "compression": "both", + "storage_efficiency_mode": "default", + "dedupe": "both", + "cross_volume_dedupe": "both", + "compaction": compaction, + "schedule": "-", + "volume_path": "/vol/test_e", + "state": state, + "op_state": status, + "type": "regular", + "progress": "Idle for 02:06:26", + "last_op_begin": "Mon Jan 02 00:10:00 2023", + "last_op_end": "Mon Jan 02 00:10:00 2023", + "last_op_size": 0, + "last_op_state": "Success", + "policy": {"name": policy} + } + }], + "num_records": 1 + } + + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'volume_efficiency_info': (200, return_vol_info_rest(), None), + 'volume_efficiency_status_running': (200, return_vol_info_rest(status='active'), None), + 'volume_efficiency_disabled': (200, return_vol_info_rest(state='disabled'), None), + 'volume_efficiency_modify': (200, return_vol_info_rest(compaction='none'), None), + "unauthorized": (403, None, {'code': 6, 'message': 'Unexpected argument "storage_efficiency_mode".'}), + "unexpected_arg": (403, None, {'code': 6, 'message': "not authorized for that command"}) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "vserver"] + error = create_module(volume_efficiency_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['path'] + assert 'one of the following is required: path, volume_name' in create_module(volume_efficiency_module, DEFAULT_ARGS_COPY, fail=True)['msg'] + + +def test_ensure_get_called_existing(): + ''' test get_volume_efficiency for existing config ''' + register_responses([ + ('sis-get-iter', ZRR['vol_eff_info']) + ]) + my_obj = create_module(volume_efficiency_module, DEFAULT_ARGS) + assert my_obj.get_volume_efficiency() + + +def test_successful_enable(): + ''' enable volume_efficiency and testing idempotency ''' + register_responses([ + ('sis-get-iter', ZRR['vol_eff_info_disabled']), + ('sis-enable', ZRR['success']), + ('sis-get-iter', ZRR['vol_eff_info']), + # idempotency check + ('sis-get-iter', ZRR['vol_eff_info']), + + ]) + DEFAULT_ARGS_COPY = DEFAULT_ARGS.copy() + del DEFAULT_ARGS_COPY['path'] + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_COPY, {'volume_name': 'volTest'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS)['changed'] + + +def test_successful_disable(): + ''' disable volume_efficiency and testing idempotency ''' + register_responses([ + ('sis-get-iter', ZRR['vol_eff_info']), + ('sis-disable', ZRR['success']), + # idempotency check + ('sis-get-iter', ZRR['vol_eff_info_disabled']), + + ]) + args = { + 'state': 'absent', + 'use_rest': 'never' + } + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, args)['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_successful_modify(): + ''' modifying volume_efficiency config and testing idempotency ''' + register_responses([ + ('sis-get-iter', ZRR['vol_eff_info']), + ('sis-set-config', ZRR['success']), + # idempotency check + ('sis-get-iter', ZRR['vol_eff_info_policy']), + + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS, {'policy': 'default'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS, {'policy': 'default'})['changed'] + + +def test_successful_start(): + ''' start volume_efficiency and testing idempotency ''' + register_responses([ + ('sis-get-iter', ZRR['vol_eff_info']), + ('sis-start', ZRR['success']), + # idempotency check + ('sis-get-iter', ZRR['vol_eff_info_running']), + + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS, {'volume_efficiency': 'start'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS, {'volume_efficiency': 'start'})['changed'] + + +def test_successful_stop(): + ''' stop volume_efficiency and testing idempotency ''' + register_responses([ + ('sis-get-iter', ZRR['vol_eff_info_running']), + ('sis-stop', ZRR['success']), + # idempotency check + ('sis-get-iter', ZRR['vol_eff_info']), + + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS, {'volume_efficiency': 'stop'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS, {'volume_efficiency': 'stop'})['changed'] + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('sis-get-iter', ZRR['error']), + ('sis-set-config', ZRR['error']), + ('sis-start', ZRR['error']), + ('sis-stop', ZRR['error']), + ('sis-enable', ZRR['error']), + ('sis-disable', ZRR['error']), + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('PATCH', 'storage/volumes', SRR['generic_error']), + ('PATCH', 'storage/volumes', SRR['unauthorized']), + ('PATCH', 'storage/volumes', SRR['unexpected_arg']) + ]) + vol_eff_obj = create_module(volume_efficiency_module, DEFAULT_ARGS) + assert 'Error getting volume efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.get_volume_efficiency, 'fail')['msg'] + assert 'Error modifying storage efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.modify_volume_efficiency, 'fail', {})['msg'] + assert 'Error starting storage efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.start_volume_efficiency, 'fail')['msg'] + assert 'Error stopping storage efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.stop_volume_efficiency, 'fail')['msg'] + assert 'Error enabling storage efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.enable_volume_efficiency, 'fail')['msg'] + assert 'Error disabling storage efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.disable_volume_efficiency, 'fail')['msg'] + + args = {'state': 'absent', 'enable_compression': True} + modify = {'enabled': 'disabled'} + vol_eff_obj = create_module(volume_efficiency_module, DEFAULT_ARGS_REST, args) + assert 'Error getting volume efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.get_volume_efficiency, 'fail')['msg'] + assert 'Error in volume/efficiency patch' in expect_and_capture_ansible_exception(vol_eff_obj.modify_volume_efficiency, 'fail', {'arg': 1})['msg'] + assert 'cannot modify storage_efficiency' in expect_and_capture_ansible_exception(vol_eff_obj.modify_volume_efficiency, 'fail', {'arg': 1})['msg'] + assert 'user is not authorized' in expect_and_capture_ansible_exception(vol_eff_obj.modify_volume_efficiency, 'fail', {'arg': 1})['msg'] + # Error: cannot set compression keys: ['enable_compression'] + assert 'when volume efficiency already disabled' in expect_and_capture_ansible_exception(vol_eff_obj.validate_efficiency_compression, 'fail', {})['msg'] + assert 'when trying to disable volume' in expect_and_capture_ansible_exception(vol_eff_obj.validate_efficiency_compression, 'fail', modify)['msg'] + + +def test_successful_enable_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_disabled']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']), + # idempotency check + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'use_rest': 'always'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'use_rest': 'always'})['changed'] + + +def test_successful_disable_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']), + # idempotency check + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_disabled']), + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'state': 'absent'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'state': 'absent'})['changed'] + + +def test_successful_modify_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']), + # idempotency check + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_modify']), + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'enable_data_compaction': False})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'enable_data_compaction': False})['changed'] + + +def test_successful_enable_vol_efficiency_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_disabled']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']), + # idempotency check + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ]) + DEFAULT_ARGS_REST_COPY = DEFAULT_ARGS_REST.copy() + del DEFAULT_ARGS_REST_COPY['path'] + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST_COPY, {'volume_name': 'vol1'})['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST)['changed'] + + +def test_successful_start_rest_all_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']), + # idempotency check + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_status_running']), + ]) + args = { + 'volume_efficiency': 'start', + 'start_ve_scan_old_data': True + } + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, args)['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_successful_stop_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_status_running']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']), + # idempotency check + ('GET', 'cluster', SRR['is_rest_9_11_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ]) + args = {'volume_efficiency': 'stop'} + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, args)['changed'] + assert not create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_negative_modify_rest_se_mode_no_version(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + error = 'Error: Minimum version of ONTAP for storage_efficiency_mode is (9, 10, 1)' + assert error in create_module(volume_efficiency_module, DEFAULT_ARGS_REST, {'storage_efficiency_mode': 'default'}, fail=True)['msg'] + error = 'Error: cannot set storage_efficiency_mode in ZAPI' + assert error in create_module(volume_efficiency_module, DEFAULT_ARGS, {'storage_efficiency_mode': 'default'}, fail=True)['msg'] + + +def test_modify_rest_se_mode(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['volume_efficiency_info']), + ('PATCH', 'storage/volumes/25311eff', SRR['success']) + ]) + assert create_and_apply(volume_efficiency_module, DEFAULT_ARGS_REST, {'storage_efficiency_mode': 'efficient'})['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py new file mode 100644 index 000000000..47525beec --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_rest.py @@ -0,0 +1,1440 @@ +# (c) 2020-2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import copy +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + assert_no_warnings, assert_warning_was_raised, print_warnings, call_main, create_and_apply,\ + create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume \ + import NetAppOntapVolume as volume_module, main as my_main # module under test + +# needed for get and modify/delete as they still use ZAPI +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +volume_info = { + "uuid": "7882901a-1aef-11ec-a267-005056b30cfa", + "comment": "carchi8py", + "name": "test_svm", + "state": "online", + "style": "flexvol", + "tiering": { + "policy": "backup", + "min_cooling_days": 0 + }, + "type": "rw", + "aggregates": [ + { + "name": "aggr1", + "uuid": "aggr1_uuid" + } + ], + "encryption": { + "enabled": True + }, + "efficiency": { + "compression": "none", + "policy": { + "name": "-" + } + }, + "files": { + "maximum": 2000 + }, + "nas": { + "gid": 0, + "security_style": "unix", + "uid": 0, + "unix_permissions": 654, + "path": '/this/path', + "export_policy": { + "name": "default" + } + }, + "snapshot_policy": { + "name": "default", + "uuid": "0a42a3d9-0c29-11ec-a267-005056b30cfa" + }, + "space": { + "logical_space": { + "enforcement": False, + "reporting": False, + }, + "size": 10737418240, + "snapshot": { + "reserve_percent": 5 + } + }, + "guarantee": { + "type": "volume" + }, + "snaplock": { + "type": "non_snaplock" + }, + "analytics": { + "state": "on" + } +} + +volume_info_mount = copy.deepcopy(volume_info) +volume_info_mount['nas']['path'] = '' +del volume_info_mount['nas']['path'] +volume_info_encrypt_off = copy.deepcopy(volume_info) +volume_info_encrypt_off['encryption']['enabled'] = False +volume_info_sl_enterprise = copy.deepcopy(volume_info) +volume_info_sl_enterprise['snaplock']['type'] = 'enterprise' +volume_info_sl_enterprise['snaplock']['retention'] = {'default': 'P30Y'} +volume_analytics_disabled = copy.deepcopy(volume_info) +volume_analytics_disabled['analytics']['state'] = 'off' +volume_analytics_initializing = copy.deepcopy(volume_info) +volume_analytics_initializing['analytics']['state'] = 'initializing' +volume_info_offline = copy.deepcopy(volume_info) +volume_info_offline['state'] = 'offline' +volume_info_tags = copy.deepcopy(volume_info) +volume_info_tags['_tags'] = ["team:csi", "environment:test"] + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_rest_96': (200, dict(version=dict(generation=9, major=6, minor=0, full='dummy_9_6_0')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'no_record': (200, {'num_records': 0, 'records': []}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + # Volume + 'get_volume': (200, {'records': [volume_info]}, None), + 'get_volume_sl_enterprise': (200, {'records': [volume_info_sl_enterprise]}, None), + 'get_volume_mount': (200, {'records': [volume_info_mount]}, None), + 'get_volume_encrypt_off': (200, {'records': [volume_info_encrypt_off]}, None), + # module specific responses + 'nas_app_record': (200, + {'records': [{"uuid": "09e9fd5e-8ebd-11e9-b162-005056b39fe7", + "name": "test_app", + "nas": { + "application_components": [{'xxx': 1}], + }}]}, None), + 'nas_app_record_by_uuid': (200, + {"uuid": "09e9fd5e-8ebd-11e9-b162-005056b39fe7", + "name": "test_app", + "nas": { + "application_components": [{'xxx': 1}], + "flexcache": { + "origin": {'svm': {'name': 'org_name'}} + } + }}, None), + 'get_aggr_one_object_store': (200, + {'records': ['one']}, None), + 'get_aggr_two_object_stores': (200, + {'records': ['two']}, None), + 'move_state_replicating': (200, {'movement': {'state': 'replicating'}}, None), + 'move_state_success': (200, {'movement': {'state': 'success'}}, None), + 'encrypting': (200, {'encryption': {'status': {'message': 'initializing'}}}, None), + 'encrypted': (200, {'encryption': {'state': 'encrypted'}}, None), + 'analytics_off': (200, {'records': [volume_analytics_disabled]}, None), + 'analytics_initializing': (200, {'records': [volume_analytics_initializing]}, None), + 'one_svm_record': (200, {'records': [{'uuid': 'svm_uuid'}]}, None), + 'volume_info_offline': (200, {'records': [volume_info_offline]}, None), + 'volume_info_tags': (200, {'records': [volume_info_tags]}, None) +}) + +DEFAULT_APP_ARGS = { + 'name': 'test_svm', + 'vserver': 'ansibleSVM', + 'nas_application_template': dict( + tiering=None + ), + # 'aggregate_name': 'whatever', # not used for create when using REST application/applications + 'size': 10, + 'size_unit': 'gb', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always' +} + +DEFAULT_VOLUME_ARGS = { + 'name': 'test_svm', + 'vserver': 'ansibleSVM', + 'aggregate_name': 'aggr1', + 'size': 10, + 'size_unit': 'gb', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always' +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + exc = create_module(volume_module, fail=True) + print('Info: %s' % exc['msg']) + assert 'missing required arguments:' in exc['msg'] + + +def test_fail_if_aggr_is_set(): + module_args = {'aggregate_name': 'should_fail'} + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + error = 'Conflict: aggregate_name is not supported when application template is enabled. Found: aggregate_name: should_fail' + assert create_module(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == error + + +def test_missing_size(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # GET volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ]) + data = dict(DEFAULT_APP_ARGS) + data.pop('size') + error = 'Error: "size" is required to create nas application.' + assert create_and_apply(volume_module, data, fail=True)['msg'] == error + + +def test_mismatched_tiering_policies(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + module_args = { + 'tiering_policy': 'none', + 'nas_application_template': {'tiering': {'policy': 'auto'}} + } + error = 'Conflict: if tiering_policy and nas_application_template tiering policy are both set, they must match.'\ + ' Found "none" and "auto".' + assert create_module(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == error + + +def test_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # GET volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('POST', 'application/applications', SRR['generic_error']), # POST application/applications + ]) + error = 'Error in create_nas_application: calling: application/applications: got %s.' % SRR['generic_error'][2] + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, fail=True)['msg'] == error + + +def test_rest_successfully_created(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('POST', 'application/applications', SRR['empty_good']), # POST application/applications + ('GET', 'storage/volumes', SRR['get_volume']), + ]) + assert create_and_apply(volume_module, DEFAULT_APP_ARGS)['changed'] + + +def test_rest_create_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + + ]) + assert not create_and_apply(volume_module, DEFAULT_APP_ARGS)['changed'] + + +def test_rest_successfully_created_with_modify(): + ''' since language is not supported in application, the module is expected to: + 1. create the volume using application REST API + 2. immediately modify the volume to update options which are not available in the nas template. + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # set unix_permissions + ]) + module_args = { + 'language': 'fr', + 'unix_permissions': '---rw-r-xr-x' + } + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_rest_successfully_resized(): + ''' make sure resize if using RESP API if sizing_method is present + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # PATCH storage/volumes + ]) + module_args = { + 'sizing_method': 'add_new_resources', + 'size': 20737418240 + } + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_rest_volume_create_modify_tags(): + ''' volume create, modify with tags + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'storage/volumes', SRR['no_record']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ('POST', 'storage/volumes', SRR['success']), + ('GET', 'storage/volumes', SRR['volume_info_tags']), + # idempotent check + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'storage/volumes', SRR['volume_info_tags']), + # modify tags + ('GET', 'cluster', SRR['is_rest_9_13_1']), + ('GET', 'storage/volumes', SRR['volume_info_tags']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + ]) + module_args = {'tags': ["team:csi", "environment:test"]} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + assert not create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + module_args = {'tags': ["team:csi"]} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_deleted(): + ''' delete volume using REST - no app + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # PATCH storage/volumes - unmount + ('DELETE', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # DELETE storage/volumes + ]) + module_args = {'state': 'absent'} + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + assert_no_warnings() + + +def test_rest_successfully_deleted_with_warning(): + ''' delete volume using REST - no app - unmount failed + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # PATCH storage/volumes - unmount + ('DELETE', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # DELETE storage/volumes + ]) + module_args = {'state': 'absent'} + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + print_warnings() + assert_warning_was_raised('Volume was successfully deleted though unmount failed with: calling: ' + 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error.') + + +def test_rest_successfully_deleted_with_app(): + ''' delete app + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # PATCH storage/volumes - unmount + ('DELETE', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['empty_good']), # DELETE storage/volumes + ]) + module_args = {'state': 'absent'} + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_rest_successfully_move_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Move volume + ]) + module_args = {'aggregate_name': 'aggr2'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_move_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Move volume + ]) + module_args = {'aggregate_name': 'aggr2'} + msg = "Error moving volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_error_rehost_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['zero_records']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ]) + module_args = {'from_vserver': 'svm_orig'} + msg = "Error: ONTAP REST API does not support Rehosting Volumes" + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_volume_unmount_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Mount Volume + ]) + module_args = {'junction_path': ''} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_volume_unmount_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Mount Volume + ]) + module_args = {'junction_path': ''} + msg = 'Error unmounting volume test_svm with path "": calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error.' + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_volume_mount_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_mount']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Mount Volume + ]) + module_args = {'junction_path': '/this/path'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_volume_mount_do_nothing_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_mount']), # Get Volume + ]) + module_args = {'junction_path': ''} + assert not create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_volume_mount_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_mount']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Mount Volume + ]) + module_args = {'junction_path': '/this/path'} + msg = 'Error mounting volume test_svm with path "/this/path": calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error.' + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_change_volume_state(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), # Move volume + ]) + module_args = {'is_online': False} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_change_volume_state(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Move volume + ]) + module_args = {'is_online': False} + msg = "Error changing state of volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_modify_attributes(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Modify + ]) + module_args = { + 'space_guarantee': 'volume', + 'percent_snapshot_space': 10, + 'snapshot_policy': 'default2', + 'export_policy': 'default2', + 'group_id': 5, + 'user_id': 5, + 'volume_security_style': 'mixed', + 'comment': 'carchi8py was here', + 'tiering_minimum_cooling_days': 10, + 'logical_space_enforcement': True, + 'logical_space_reporting': True + } + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_modify_attributes(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Modify + ]) + module_args = { + 'space_guarantee': 'volume', + 'percent_snapshot_space': 10, + 'snapshot_policy': 'default2', + 'export_policy': 'default2', + 'group_id': 5, + 'user_id': 5, + 'volume_security_style': 'mixed', + 'comment': 'carchi8py was here', + } + msg = "Error modifying volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_create_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ]) + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS)['changed'] + + +def test_rest_error_get_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['generic_error']), # Get Volume + ]) + msg = "calling: storage/volumes: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, fail=True)['msg'] == msg + + +def test_rest_error_create_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['generic_error']), # Create Volume + ]) + msg = "Error creating volume test_svm: calling: storage/volumes: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, fail=True)['msg'] == msg + + +def test_rest_successfully_create_volume_with_options(): + module_args = { + 'space_guarantee': 'volume', + 'percent_snapshot_space': 5, + 'snapshot_policy': 'default', + 'export_policy': 'default', + 'group_id': 0, + 'user_id': 0, + 'volume_security_style': 'unix', + 'comment': 'carchi8py', + 'type': 'RW', + 'language': 'en', + 'encrypt': True, + 'junction_path': '/this/path', + 'tiering_policy': 'backup', + 'tiering_minimum_cooling_days': 10, + } + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + # TODO - force a patch after create + # ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # modify Volume + ]) + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS)['changed'] + + +def test_rest_successfully_snapshot_restore_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Modify Snapshot restore + ]) + module_args = {'snapshot_restore': 'snapshot_copy'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_snapshot_restore_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Modify Snapshot restore + ]) + module_args = {'snapshot_restore': 'snapshot_copy'} + msg = "Error restoring snapshot snapshot_copy in volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_error_snapshot_restore_volume_no_parent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['zero_records']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ]) + module_args = {'snapshot_restore': 'snapshot_copy'} + msg = "Error restoring volume: cannot find parent: test_svm" + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_rename_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume name + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume from + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume from + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Patch + ]) + module_args = { + 'from_name': 'test_svm', + 'name': 'new_name' + } + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_rename_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume name + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume from + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume from + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Patch + ]) + module_args = { + 'from_name': 'test_svm', + 'name': 'new_name' + } + msg = "Error changing name of volume new_name: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_error_resizing_volume(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), # Get Volume name + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Resize volume + ]) + module_args = { + 'sizing_method': 'add_new_resources', + 'size': 20737418240 + } + msg = "Error resizing volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_create_volume_with_unix_permissions(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # add unix permissions + ]) + module_args = {'unix_permissions': '---rw-r-xr-x'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_create_volume_with_qos_policy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Set policy name + ]) + module_args = {'qos_policy_group': 'policy-name'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_create_volume_with_qos_adaptive_policy_group(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Set policy name + ]) + module_args = {'qos_adaptive_policy_group': 'policy-name'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_create_volume_with_qos_adaptive_policy_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + module_args = { + 'qos_adaptive_policy_group': 'policy-name', + 'qos_policy_group': 'policy-name' + } + msg = "Error: With Rest API qos_policy_group and qos_adaptive_policy_group are now the same thing, and cannot be set at the same time" + assert create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_create_volume_with_tiering_policy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Set Tiering_policy + ]) + module_args = {'tiering_policy': 'all'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_create_volume_encrypt(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Set Encryption + ]) + module_args = {'encrypt': False} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +@patch('time.sleep') +def test_rest_successfully_modify_volume_encrypt(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # Set Encryption + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), + ('GET', 'storage/volumes', SRR['encrypting']), + ('GET', 'storage/volumes', SRR['encrypting']), + ('GET', 'storage/volumes', SRR['encrypting']), + ('GET', 'storage/volumes', SRR['encrypted']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']) + ]) + module_args = {'encrypt': True} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + module_args = {'encrypt': True, 'wait_for_completion': True} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + error = 'Error getting volume encryption_conversion status' + assert error in create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] + error = 'unencrypting volume is only supported when moving the volume to another aggregate in REST' + assert error in create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, {'encrypt': False}, fail=True)['msg'] + + +def test_rest_error_modify_volume_encrypt(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Set Encryption + ]) + module_args = {'encrypt': True} + msg = "Error enabling encryption for volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_successfully_modify_volume_compression(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # compression + ]) + module_args = { + 'efficiency_policy': 'test', + 'compression': True + } + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_successfully_modify_volume_inline_compression(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # compression + ]) + module_args = { + 'efficiency_policy': 'test', + 'inline_compression': True + } + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_modify_volume_efficiency_policy(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Set Encryption + ]) + module_args = {'efficiency_policy': 'test'} + msg = "Error setting efficiency for volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_error_volume_compression_both(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume_encrypt_off']), # Get Volume + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['generic_error']), # Set Encryption + ]) + module_args = { + 'compression': True, + 'inline_compression': True + } + msg = "Error setting efficiency for volume test_svm: calling: storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa: got Expected error." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_error_modify_volume_efficiency_policy_with_ontap_96(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = {'efficiency_policy': 'test'} + msg = "Error: Minimum version of ONTAP for efficiency_policy is (9, 7)." + assert msg in create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] + + +def test_rest_error_modify_volume_tiering_minimum_cooling_days_98(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = {'tiering_minimum_cooling_days': 2} + msg = "Error: Minimum version of ONTAP for tiering_minimum_cooling_days is (9, 8)." + assert msg in create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] + + +def test_rest_successfully_created_with_logical_space(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), # GET svm + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ]) + module_args = { + 'logical_space_enforcement': False, + 'logical_space_reporting': False + } + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_rest_error_modify_backend_fabricpool(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), + ('GET', 'storage/aggregates/aggr1_uuid/cloud-stores', SRR['no_record']), # get_aggr_object_stores + ]) + module_args = { + 'nas_application_template': {'tiering': {'control': 'required'}}, + 'feature_flags': {'warn_or_fail_on_fabricpool_backend_change': 'fail'} + } + + msg = "Error: changing a volume from one backend to another is not allowed. Current tiering control: disallowed, desired: required." + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == msg + + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), + ('GET', 'application/applications', SRR['no_record']), # TODO: modify + ]) + module_args['feature_flags'] = {'warn_or_fail_on_fabricpool_backend_change': 'invalid'} + assert not create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + print_warnings() + warning = "Unexpected value 'invalid' for warn_or_fail_on_fabricpool_backend_change, expecting: None, 'ignore', 'fail', 'warn'" + assert_warning_was_raised(warning) + + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), + ('GET', 'storage/aggregates/aggr1_uuid/cloud-stores', SRR['no_record']), # get_aggr_object_stores + ('GET', 'application/applications', SRR['no_record']), # TODO: modify + ]) + module_args['feature_flags'] = {'warn_or_fail_on_fabricpool_backend_change': 'warn'} + assert not create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + warning = "Ignored %s" % msg + print_warnings() + assert_warning_was_raised(warning) + + +def test_rest_negative_modify_backend_fabricpool(): + ''' fail to get aggregate object store''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), + ('GET', 'storage/aggregates/aggr1_uuid/cloud-stores', SRR['generic_error']), + ]) + module_args = { + 'nas_application_template': {'tiering': {'control': 'required'}}, + 'feature_flags': {'warn_or_fail_on_fabricpool_backend_change': 'fail'} + } + msg = "Error getting object store for aggregate: aggr1: calling: storage/aggregates/aggr1_uuid/cloud-stores: got Expected error." + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == msg + + +def test_rest_tiering_control(): + ''' The volume is supported by one or more aggregates + If all aggregates are associated with one or more object stores, the volume has a FabricPool backend. + If all aggregates are not associated with one or more object stores, the volume meets the 'disallowed' criteria. + ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/aggregates/uuid1/cloud-stores', SRR['no_record']), # get_aggr_object_stores aggr1 + ('GET', 'storage/aggregates/uuid2/cloud-stores', SRR['no_record']), # get_aggr_object_stores aggr2 + ('GET', 'storage/aggregates/uuid1/cloud-stores', SRR['get_aggr_one_object_store']), # get_aggr_object_stores aggr1 + ('GET', 'storage/aggregates/uuid2/cloud-stores', SRR['no_record']), # get_aggr_object_stores aggr2 + ('GET', 'storage/aggregates/uuid1/cloud-stores', SRR['get_aggr_two_object_stores']), # get_aggr_object_stores aggr1 + ('GET', 'storage/aggregates/uuid2/cloud-stores', SRR['get_aggr_one_object_store']), # get_aggr_object_stores aggr2 + ]) + module_args = { + 'nas_application_template': {'tiering': {'control': 'required'}}, + 'feature_flags': {'warn_or_fail_on_fabricpool_backend_change': 'fail'} + } + current = {'aggregates': [{'name': 'aggr1', 'uuid': 'uuid1'}, {'name': 'aggr2', 'uuid': 'uuid2'}]} + vol_object = create_module(volume_module, DEFAULT_APP_ARGS, module_args) + result = vol_object.tiering_control(current) + assert result == 'disallowed' + result = vol_object.tiering_control(current) + assert result == 'best_effort' + result = vol_object.tiering_control(current) + assert result == 'required' + current = {'aggregates': []} + result = vol_object.tiering_control(current) + assert result is None + + +def test_error_snaplock_volume_create_sl_type_not_changed(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ('POST', 'storage/volumes', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['get_volume']), + ]) + module_args = {'snaplock': {'type': 'enterprise'}} + error = 'Error: volume snaplock type was not set properly at creation time. Current: non_snaplock, desired: enterprise.' + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + + +def test_error_snaplock_volume_create_sl_type_not_supported(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ]) + module_args = {'snaplock': {'type': 'enterprise'}} + error = 'Error: using snaplock type requires ONTAP 9.10.1 or later and REST must be enabled - ONTAP version: 9.6.0 - using REST.' + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + + +def test_error_snaplock_volume_create_sl_options_not_supported_when_non_snaplock(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ]) + module_args = {'snaplock': { + 'type': 'non_snaplock', + 'retention': {'default': 'P30Y'} + }} + error = "Error: snaplock options are not supported for non_snaplock volume, found: {'retention': {'default': 'P30Y'}}." + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + + # 'non_snaplock' is the default too + module_args = {'snaplock': { + 'retention': {'default': 'P30Y'} + }} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + + +def test_snaplock_volume_create(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['empty_records']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ('POST', 'storage/volumes', SRR['empty_records']), + ('GET', 'storage/volumes', SRR['get_volume_sl_enterprise']), + ]) + module_args = {'snaplock': {'type': 'enterprise', 'retention': {'maximum': 'P5D'}}} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_error_snaplock_volume_modify_type(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['get_volume_sl_enterprise']), + ]) + module_args = {'snaplock': {'type': 'compliance'}} + error = 'Error: changing a volume snaplock type after creation is not allowed. Current: enterprise, desired: compliance.' + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + + +def test_snaplock_volume_modify_other_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['get_volume_sl_enterprise']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + ]) + module_args = {'snaplock': { + 'retention': {'default': 'P20Y'} + }} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_snaplock_volume_modify_other_options_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['get_volume_sl_enterprise']), + ]) + module_args = {'snaplock': { + 'retention': {'default': 'P30Y'} + }} + assert not create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +def test_max_files_volume_modify(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['get_volume_sl_enterprise']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + ]) + module_args = {'max_files': 3000} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, module_args)['changed'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_use_zapi_and_netapp_lib_missing(mock_has_netapp_lib): + """ZAPI requires netapp_lib""" + register_responses([ + ]) + mock_has_netapp_lib.return_value = False + module_args = {'use_rest': 'never'} + error = 'Error: the python NetApp-Lib module is required. Import error: None' + assert create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + + +def test_fallback_to_zapi_and_nas_application_is_used(): + """fallback to ZAPI when use_rest: auto and some ZAPI only options are used""" + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = {'use_rest': 'auto', 'cutover_action': 'wait', 'nas_application_template': {'storage_service': 'value'}} + error = "Error: nas_application_template requires REST support. use_rest: auto. "\ + "Conflict because of unsupported option(s) or option value(s) in REST: ['cutover_action']." + assert create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + assert_warning_was_raised("Falling back to ZAPI because of unsupported option(s) or option value(s) in REST: ['cutover_action']") + + +def test_fallback_to_zapi_and_rest_option_is_used(): + """fallback to ZAPI when use_rest: auto and some ZAPI only options are used""" + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = {'use_rest': 'auto', 'cutover_action': 'wait', 'sizing_method': 'use_existing_resources'} + error = "Error: sizing_method option is not supported with ZAPI. It can only be used with REST. use_rest: auto. "\ + "Conflict because of unsupported option(s) or option value(s) in REST: ['cutover_action']." + assert create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args, fail=True)['msg'] == error + assert_warning_was_raised("Falling back to ZAPI because of unsupported option(s) or option value(s) in REST: ['cutover_action']") + + +def test_error_conflict_export_policy_and_nfs_access(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = { + 'export_policy': 'auto', + 'nas_application_template': { + 'tiering': None, + 'nfs_access': [{'access': 'ro'}] + }, + 'tiering_policy': 'backup' + } + error = 'Conflict: export_policy option and nfs_access suboption in nas_application_template are mutually exclusive.' + assert create_module(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == error + + +def test_create_nas_app_nfs_access(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('POST', 'application/applications', SRR['empty_good']), # POST application/applications + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['get_volume']), + ]) + module_args = { + 'nas_application_template': { + 'exclude_aggregates': ['aggr_ex'], + 'nfs_access': [{'access': 'ro'}], + 'tiering': None, + }, + 'snapshot_policy': 'snspol' + } + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_create_nas_app_tiering_object_store(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('POST', 'application/applications', SRR['empty_good']), # POST application/applications + ('GET', 'storage/volumes', SRR['get_volume']), + ('GET', 'storage/aggregates/aggr1_uuid/cloud-stores', SRR['get_aggr_one_object_store']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['get_volume']), + ]) + module_args = { + 'nas_application_template': { + 'flexcache': { + 'dr_cache': True, + 'origin_component_name': 'ocn', + 'origin_svm_name': 'osn', + }, + 'storage_service': 'extreme', + 'tiering': { + 'control': 'required', + 'object_stores': ['obs1'] + }, + }, + 'export_policy': 'exppol', + 'qos_policy_group': 'qospol', + 'snapshot_policy': 'snspol' + } + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_create_nas_app_tiering_policy_flexcache(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('POST', 'application/applications', SRR['empty_good']), # POST application/applications + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['get_volume']), + ]) + module_args = { + 'nas_application_template': { + 'flexcache': { + 'dr_cache': True, + 'origin_component_name': 'ocn', + 'origin_svm_name': 'osn', + }, + 'storage_service': 'extreme', + }, + 'qos_policy_group': 'qospol', + 'snapshot_policy': 'snspol', + 'tiering_policy': 'snapshot-only', + } + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_create_nas_app_tiering_flexcache(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), + ('GET', 'application/applications', SRR['no_record']), # GET application/applications + ('POST', 'application/applications', SRR['empty_good']), # POST application/applications + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['get_volume']), + ]) + module_args = { + 'nas_application_template': { + 'flexcache': { + 'dr_cache': True, + 'origin_component_name': 'ocn', + 'origin_svm_name': 'osn', + }, + 'storage_service': 'extreme', + 'tiering': { + 'control': 'best_effort' + }, + }, + 'qos_policy_group': 'qospol', + 'snapshot_policy': 'snspol' + } + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + + +def test_version_error_nas_app(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ]) + module_args = { + 'nas_application_template': { + 'flexcache': { + 'dr_cache': True, + 'origin_component_name': 'ocn', + 'origin_svm_name': 'osn', + }, + }, + } + error = 'Error: using nas_application_template requires ONTAP 9.7 or later and REST must be enabled - ONTAP version: 9.6.0.' + assert create_module(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == error + + +def test_version_error_nas_app_dr_cache(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + module_args = { + 'nas_application_template': { + 'flexcache': { + 'dr_cache': True, + 'origin_component_name': 'ocn', + 'origin_svm_name': 'osn', + }, + }, + } + error = 'Error: using flexcache: dr_cache requires ONTAP 9.9 or later and REST must be enabled - ONTAP version: 9.8.0.' + assert create_module(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == error + + +def test_error_volume_rest_patch(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + my_obj = create_module(volume_module, DEFAULT_APP_ARGS) + my_obj.parameters['uuid'] = None + error = 'Could not read UUID for volume test_svm in patch.' + assert expect_and_capture_ansible_exception(my_obj.volume_rest_patch, 'fail', {})['msg'] == error + + +def test_error_volume_rest_delete(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + my_obj = create_module(volume_module, DEFAULT_APP_ARGS) + my_obj.parameters['uuid'] = None + error = 'Could not read UUID for volume test_svm in delete.' + assert expect_and_capture_ansible_exception(my_obj.rest_delete_volume, 'fail', '')['msg'] == error + + +def test_error_modify_app_not_supported_no_volume_but_app(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['no_record']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ('GET', 'application/applications', SRR['nas_app_record']), + ('GET', 'application/applications/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['nas_app_record_by_uuid']), + ]) + module_args = {} + # TODO: we need to handle this error case with a better error mssage + error = \ + 'Error in create_nas_application: function create_application should not be called when application uuid is set: 09e9fd5e-8ebd-11e9-b162-005056b39fe7.' + assert create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args, fail=True)['msg'] == error + + +def test_warning_modify_app_not_supported(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'storage/volumes', SRR['get_volume']), + ('GET', 'application/applications', SRR['nas_app_record']), + ('GET', 'application/applications/09e9fd5e-8ebd-11e9-b162-005056b39fe7', SRR['nas_app_record_by_uuid']), + ]) + module_args = { + 'nas_application_template': { + 'flexcache': { + 'dr_cache': True, + 'origin_component_name': 'ocn', + 'origin_svm_name': 'osn', + }, + }, + } + assert not create_and_apply(volume_module, DEFAULT_APP_ARGS, module_args)['changed'] + assert_warning_was_raised("Modifying an app is not supported at present: ignoring: {'flexcache': {'origin': {'svm': {'name': 'osn'}}}}") + + +def test_create_flexgroup_volume_from_main(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), # Get Volume + ('GET', 'svm/svms', SRR['one_svm_record']), + ('POST', 'storage/volumes', SRR['no_record']), # Create Volume + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # eff policy + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # modify + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['no_record']), # offline + ]) + args = copy.deepcopy(DEFAULT_VOLUME_ARGS) + del args['aggregate_name'] + module_args = { + 'aggr_list': 'aggr_0,aggr_1', + 'aggr_list_multiplier': 2, + 'comment': 'some comment', + 'compression': False, + 'efficiency_policy': 'effpol', + 'export_policy': 'exppol', + 'group_id': 1001, + 'junction_path': '/this/path', + 'inline_compression': False, + 'is_online': False, + 'language': 'us', + 'percent_snapshot_space': 10, + 'snapshot_policy': 'snspol', + 'space_guarantee': 'file', + 'tiering_minimum_cooling_days': 30, + 'tiering_policy': 'snapshot-only', + 'type': 'rw', + 'user_id': 123, + 'volume_security_style': 'unix', + } + assert call_main(my_main, args, module_args)['changed'] + + +def test_get_volume_style(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ]) + args = copy.deepcopy(DEFAULT_VOLUME_ARGS) + del args['aggregate_name'] + module_args = { + 'auto_provision_as': 'flexgroup', + } + my_obj = create_module(volume_module, args, module_args) + assert my_obj.get_volume_style(None) == 'flexgroup' + assert my_obj.parameters.get('aggr_list_multiplier') == 1 + + +def test_move_volume_with_rest_passthrough(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('PATCH', 'private/cli/volume/move/start', SRR['success']), + ('PATCH', 'private/cli/volume/move/start', SRR['generic_error']), + ]) + module_args = { + 'aggregate_name': 'aggr2' + } + obj = create_module(volume_module, DEFAULT_VOLUME_ARGS, module_args) + error = obj.move_volume_with_rest_passthrough(True) + assert error is None + error = obj.move_volume_with_rest_passthrough(True) + assert 'Expected error' in error + + +def test_ignore_small_change(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + obj = create_module(volume_module, DEFAULT_VOLUME_ARGS) + obj.parameters['attribute'] = 51 + assert obj.ignore_small_change({'attribute': 50}, 'attribute', .5) is None + assert obj.parameters['attribute'] == 51 + assert_no_warnings() + obj.parameters['attribute'] = 50.2 + assert obj.ignore_small_change({'attribute': 50}, 'attribute', .5) is None + assert obj.parameters['attribute'] == 50 + print_warnings() + assert_warning_was_raised('resize request for attribute ignored: 0.4% is below the threshold: 0.5%') + + +def test_set_efficiency_rest_empty_body(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ]) + obj = create_module(volume_module, DEFAULT_VOLUME_ARGS) + # no action + assert obj.set_efficiency_rest() is None + + +@patch('time.sleep') +def test_volume_move_rest(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'storage/volumes', SRR['get_volume_mount']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + ('GET', 'storage/volumes', SRR['move_state_replicating']), + ('GET', 'storage/volumes', SRR['move_state_success']), + # error when trying to get volume status + ('GET', 'cluster', SRR['is_rest_9_8_0']), + ('GET', 'storage/volumes', SRR['get_volume_mount']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']), + ('GET', 'storage/volumes', SRR['generic_error']) + ]) + args = {'aggregate_name': 'aggr2', 'wait_for_completion': True, 'max_wait_time': 280} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, args)['changed'] + error = "Error getting volume move status" + assert error in create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, args, fail=True)['msg'] + + +def test_analytics_option(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['no_record']), + ('GET', 'svm/svms', SRR['one_svm_record']), + ('POST', 'storage/volumes', SRR['success']), + ('GET', 'storage/volumes', SRR['get_volume']), + # idempotency check + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), + # Disable analytics + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['get_volume']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + # Enable analytics + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['analytics_off']), + ('PATCH', 'storage/volumes/7882901a-1aef-11ec-a267-005056b30cfa', SRR['success']), + # Try to Enable analytics which is initializing(no change required.) + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['analytics_initializing']) + ]) + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, {'analytics': 'on'})['changed'] + assert not create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, {'analytics': 'on'})['changed'] + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, {'analytics': 'off'})['changed'] + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, {'analytics': 'on'})['changed'] + assert not create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, {'analytics': 'on'})['changed'] + + +def test_warn_rest_modify(): + """ Test skip snapshot_restore and modify when volume is offline """ + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'storage/volumes', SRR['volume_info_offline']) + ]) + args = {'is_online': False, 'junction_path': '/test', 'use_rest': 'always', 'snapshot_restore': 'restore1'} + assert create_and_apply(volume_module, DEFAULT_VOLUME_ARGS, args)['changed'] is False + assert_warning_was_raised("Cannot perform action(s): ['snapshot_restore'] and modify: ['junction_path']", partial_match=True) diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py new file mode 100644 index 000000000..0c836233f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_volume_snaplock.py @@ -0,0 +1,131 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_ontap_volume_snaplock """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_snaplock \ + import NetAppOntapVolumeSnaplock as snaplock_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.type = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.type == 'snaplock': + xml = self.build_snaplock_info(self.params) + elif self.type == 'zapi_error': + error = netapp_utils.zapi.NaApiError('test', 'error') + raise error + self.xml_out = xml + return xml + + @staticmethod + def build_snaplock_info(data): + ''' build xml data for vserser-info ''' + xml = netapp_utils.zapi.NaElement('xml') + attributes = {'snaplock-attrs': { + 'snaplock-attrs-info': { + 'autocommit-period': data['autocommit_period'], + 'default-retention-period': data['default_retention_period'], + 'maximum-retention-period': data['maximum_retention_period'], + 'minimum-retention-period': data['minimum_retention_period'], + 'is-volume-append-mode-enabled': data['is_volume_append_mode_enabled'] + } + }} + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_snaplock = { + 'autocommit_period': '10days', + 'default_retention_period': '1years', + 'maximum_retention_period': '2years', + 'minimum_retention_period': '6months', + 'is_volume_append_mode_enabled': 'false' + } + + def mock_args(self): + return { + 'name': 'test_volume', + 'autocommit_period': self.mock_snaplock['autocommit_period'], + 'default_retention_period': self.mock_snaplock['default_retention_period'], + 'maximum_retention_period': self.mock_snaplock['maximum_retention_period'], + 'minimum_retention_period': self.mock_snaplock['minimum_retention_period'], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'test_vserver' + } + + def get_snaplock_mock_object(self, kind=None): + """ + Helper method to return an na_ontap_volume_snaplock object + :param kind: passes this param to MockONTAPConnection() + :return: na_ontap_volume_snaplock object + """ + snaplock_obj = snaplock_module() + if kind is None: + snaplock_obj.server = MockONTAPConnection() + else: + snaplock_obj.server = MockONTAPConnection(kind=kind, data=self.mock_snaplock) + return snaplock_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + snaplock_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_existing_snaplock(self): + set_module_args(self.mock_args()) + result = self.get_snaplock_mock_object(kind='snaplock').get_volume_snaplock_attrs() + assert result['autocommit_period'] == self.mock_snaplock['autocommit_period'] + assert result['default_retention_period'] == self.mock_snaplock['default_retention_period'] + assert result['is_volume_append_mode_enabled'] is False + assert result['maximum_retention_period'] == self.mock_snaplock['maximum_retention_period'] + + def test_modify_snaplock(self): + data = self.mock_args() + data['maximum_retention_period'] = '5years' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_snaplock_mock_object('snaplock').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_volume_snaplock.NetAppOntapVolumeSnaplock.get_volume_snaplock_attrs') + def test_modify_snaplock_error(self, get_volume_snaplock_attrs): + data = self.mock_args() + data['maximum_retention_period'] = '5years' + set_module_args(data) + get_volume_snaplock_attrs.side_effect = [self.mock_snaplock] + with pytest.raises(AnsibleFailJson) as exc: + self.get_snaplock_mock_object('zapi_error').apply() + assert exc.value.args[0]['msg'] == 'Error setting snaplock attributes for volume test_volume : NetApp API failed. Reason - test:error' diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py new file mode 100644 index 000000000..924865507 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan.py @@ -0,0 +1,200 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_vscan''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan \ + import NetAppOntapVscan as vscan_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') +HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required" + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'enabled': (200, {'records': [{'enabled': True, 'svm': {'uuid': 'testuuid'}}]}, None), + 'disabled': (200, {'records': [{'enabled': False, 'svm': {'uuid': 'testuuid'}}]}, None), +} + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'enable': + xml = self.build_vscan_status_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_vscan_status_info(status): + xml = netapp_utils.zapi.NaElement('xml') + attributes = {'num-records': 1, + 'attributes-list': {'vscan-status-info': {'is-vscan-enabled': status}}} + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_job_schedule ''' + + def mock_args(self): + return { + 'enable': False, + 'vserver': 'vserver', + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_vscan_mock_object(self, cx_type='zapi', kind=None, status=None): + vscan_obj = vscan_module() + if cx_type == 'zapi': + if kind is None: + vscan_obj.server = MockONTAPConnection() + else: + vscan_obj.server = MockONTAPConnection(kind=kind, data=status) + # For rest, mocking is achieved through side_effect + return vscan_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + vscan_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_successfully_enable(self): + data = self.mock_args() + data['enable'] = True + data['use_rest'] = 'never' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object('zapi', 'enable', 'false').apply() + assert exc.value.args[0]['changed'] + + def test_idempotently_enable(self): + data = self.mock_args() + data['enable'] = True + data['use_rest'] = 'never' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object('zapi', 'enable', 'true').apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_disable(self): + data = self.mock_args() + data['enable'] = False + data['use_rest'] = 'never' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object('zapi', 'enable', 'true').apply() + assert exc.value.args[0]['changed'] + + def test_idempotently_disable(self): + data = self.mock_args() + data['enable'] = False + data['use_rest'] = 'never' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object('zapi', 'enable', 'false').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_error(self, mock_request): + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_vscan_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['msg'] == SRR['generic_error'][2] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successly_enable(self, mock_request): + data = self.mock_args() + data['enable'] = True + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['disabled'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_idempotently_enable(self, mock_request): + data = self.mock_args() + data['enable'] = True + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['enabled'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successly_disable(self, mock_request): + data = self.mock_args() + data['enable'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['enabled'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object(cx_type='rest').apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_idempotently_disable(self, mock_request): + data = self.mock_args() + data['enable'] = False + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['disabled'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_vscan_mock_object(cx_type='rest').apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py new file mode 100644 index 000000000..d5228c1cc --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_access_policy.py @@ -0,0 +1,348 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_vscan_scanner_pool ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import patch_ansible,\ + create_module, create_and_apply, expect_and_capture_ansible_exception +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_on_access_policy \ + import NetAppOntapVscanOnAccessPolicy as policy_module # module under test +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +DEFAULT_ARGS = { + 'state': 'present', + 'vserver': 'test_vserver', + 'policy_name': 'test_carchi', + 'max_file_size': 2147483648 + 1, # 2GB + 1 + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' +} + + +vscan_info = { + 'num-records': 1, + 'attributes-list': { + 'vscan-on-access-policy-info': { + 'policy-name': 'test_carchi', + 'vserver': 'test_vserver', + 'max-file-size': 2147483648 + 1, + 'is-scan-mandatory': 'false', + 'scan-files-with-no-ext': 'true', + 'is-policy-enabled': 'true', + 'file-ext-to-include': ['py'] + } + } +} + + +ZRR = zapi_responses({ + 'vscan_info': build_zapi_response(vscan_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + # with python 2.6, dictionaries are not ordered + fragments = ["missing required arguments:", "hostname", "policy_name", "vserver"] + error = create_module(policy_module, {}, fail=True)['msg'] + for fragment in fragments: + assert fragment in error + + +def test_get_nonexistent_policy(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['empty']) + ]) + policy_obj = create_module(policy_module, DEFAULT_ARGS) + result = policy_obj.get_on_access_policy() + assert result is None + + +def test_get_existing_scanner(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['vscan_info']) + ]) + policy_obj = create_module(policy_module, DEFAULT_ARGS) + result = policy_obj.get_on_access_policy() + assert result + + +def test_successfully_create(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['empty']), + ('vscan-on-access-policy-create', ZRR['success']) + ]) + assert create_and_apply(policy_module, DEFAULT_ARGS)['changed'] + + +def test_create_idempotency(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['vscan_info']) + ]) + assert create_and_apply(policy_module, DEFAULT_ARGS)['changed'] is False + + +def test_successfully_delete(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['vscan_info']), + ('vscan-on-access-policy-delete', ZRR['success']) + ]) + assert create_and_apply(policy_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] + + +def test_delete_idempotency(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['empty']) + ]) + assert create_and_apply(policy_module, DEFAULT_ARGS, {'state': 'absent'})['changed'] is False + + +def test_successfully_create_and_enable_policy(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['empty']), + ('vscan-on-access-policy-create', ZRR['success']), + ('vscan-on-access-policy-status-modify', ZRR['success']) + ]) + args = {'policy_status': True} + assert create_and_apply(policy_module, DEFAULT_ARGS, args)['changed'] + + +def test_disable_policy_and_delete(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['vscan_info']), + ('vscan-on-access-policy-status-modify', ZRR['success']), + ('vscan-on-access-policy-delete', ZRR['success']) + ]) + args = {'policy_status': False, 'state': 'absent'} + assert create_and_apply(policy_module, DEFAULT_ARGS, args)['changed'] + + +def test_modify_policy(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['vscan_info']), + ('vscan-on-access-policy-modify', ZRR['success']) + ]) + args = {'max_file_size': 2147483650} + assert create_and_apply(policy_module, DEFAULT_ARGS, args)['changed'] + + +def test_modify_files_to_incluse_empty_error(): + args = {'file_ext_to_include': []} + msg = 'Error: The value for file_ext_include cannot be empty' + assert msg in create_module(policy_module, DEFAULT_ARGS, args, fail=True)['msg'] + + +def module_error_disable_policy(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['vscan_info']), + ('vscan-on-access-policy-status-modify', ZRR['error']) + ]) + args = {'policy_status': False} + error = create_and_apply(policy_module, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error modifying status Vscan on Access Policy' in error + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('vscan-on-access-policy-get-iter', ZRR['error']), + ('vscan-on-access-policy-create', ZRR['error']), + ('vscan-on-access-policy-modify', ZRR['error']), + ('vscan-on-access-policy-delete', ZRR['error']), + ]) + + policy_obj = create_module(policy_module, DEFAULT_ARGS) + + error = expect_and_capture_ansible_exception(policy_obj.get_on_access_policy, 'fail')['msg'] + assert 'Error searching Vscan on Access Policy' in error + + error = expect_and_capture_ansible_exception(policy_obj.create_on_access_policy, 'fail')['msg'] + assert 'Error creating Vscan on Access Policy' in error + + error = expect_and_capture_ansible_exception(policy_obj.modify_on_access_policy, 'fail')['msg'] + assert 'Error Modifying Vscan on Access Policy' in error + + error = expect_and_capture_ansible_exception(policy_obj.delete_on_access_policy, 'fail')['msg'] + assert 'Error Deleting Vscan on Access Policy' in error + + +DEFAULT_ARGS_REST = { + "policy_name": "custom_CIFS", + "policy_status": True, + "file_ext_to_exclude": ["exe", "yml", "py"], + "file_ext_to_include": ['txt', 'json'], + "scan_readonly_volumes": True, + "only_execute_access": False, + "is_scan_mandatory": True, + "paths_to_exclude": ['\folder1', '\folder2'], + "scan_files_with_no_ext": True, + "max_file_size": 2147483648, + "vserver": "vscan-test", + "hostname": "test", + "username": "test_user", + "password": "test_pass", + "use_rest": "always" +} + + +SRR = rest_responses({ + 'vscan_on_access_policy': (200, {"records": [ + { + "svm": {"name": "vscan-test"}, + "name": "custom_CIFS", + "enabled": True, + "mandatory": True, + "scope": { + "max_file_size": 2147483648, + "exclude_paths": ["\folder1", "\folder2"], + "include_extensions": ["txt", "json"], + "exclude_extensions": ["exe", "yml", "py"], + "scan_without_extension": True, + "scan_readonly_volumes": True, + "only_execute_access": False + } + } + ], "num_records": 1}, None), + 'svm_uuid': (200, {"records": [ + { + 'uuid': 'e3cb5c7f-cd20' + }], "num_records": 1}, None) +}) + + +def test_successfully_create_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['empty_records']), + ('POST', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['success']) + ]) + assert create_and_apply(policy_module, DEFAULT_ARGS_REST)['changed'] + + +def test_successfully_create_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['vscan_on_access_policy']) + ]) + assert create_and_apply(policy_module, DEFAULT_ARGS_REST)['changed'] is False + + +def test_modify_policy_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['vscan_on_access_policy']), + ('PATCH', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies/custom_CIFS', SRR['success']) + ]) + args = { + "policy_status": False, + "file_ext_to_exclude": ['yml'], + "file_ext_to_include": ['json'], + "scan_readonly_volumes": False, + "only_execute_access": True, + "is_scan_mandatory": False, + "paths_to_exclude": ['\folder1'], + "scan_files_with_no_ext": False, + "max_file_size": 2147483649 + } + assert create_and_apply(policy_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_disable_and_delete_policy_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['vscan_on_access_policy']), + ('PATCH', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies/custom_CIFS', SRR['success']), + ('DELETE', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies/custom_CIFS', SRR['success']) + ]) + args = { + 'state': 'absent', + 'policy_status': False + } + assert create_and_apply(policy_module, DEFAULT_ARGS_REST, args)['changed'] + + +def test_delete_idempotent(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['empty_records']) + ]) + args = { + 'state': 'absent' + } + assert create_and_apply(policy_module, DEFAULT_ARGS_REST, args)['changed'] is False + + +def test_get_vserver_not_found(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['empty_records']) + ]) + msg = 'Error vserver vscan-test does not exist or is not a data vserver.' + assert msg in create_and_apply(policy_module, DEFAULT_ARGS_REST, fail=True)['msg'] + + +def test_invalid_option_error_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']) + ]) + args = {'paths_to_exclude': [""]} + msg = 'Error: Invalid value specified for option(s)' + assert msg in create_module(policy_module, DEFAULT_ARGS_REST, args, fail=True)['msg'] + + +def test_get_error_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['svm_uuid']), + ('GET', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['generic_error']) + ]) + msg = 'Error searching Vscan on Access Policy' + assert msg in create_and_apply(policy_module, DEFAULT_ARGS_REST, fail=True)['msg'] + + +def test_if_all_methods_catch_exception_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/svms', SRR['generic_error']), + ('POST', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies', SRR['generic_error']), + ('PATCH', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies/custom_CIFS', SRR['generic_error']), + ('DELETE', 'protocols/vscan/e3cb5c7f-cd20/on-access-policies/custom_CIFS', SRR['generic_error']) + ]) + + policy_obj = create_module(policy_module, DEFAULT_ARGS_REST) + policy_obj.svm_uuid = "e3cb5c7f-cd20" + + msg = 'calling: svm/svms: got Expected error.' + assert msg in expect_and_capture_ansible_exception(policy_obj.get_svm_uuid, 'fail')['msg'] + + msg = 'Error creating Vscan on Access Policy' + assert msg in expect_and_capture_ansible_exception(policy_obj.create_on_access_policy_rest, 'fail')['msg'] + + msg = 'Error Modifying Vscan on Access Policy' + assert msg in expect_and_capture_ansible_exception(policy_obj.modify_on_access_policy_rest, 'fail', {"policy_status": False})['msg'] + + msg = 'Error Deleting Vscan on Access Policy' + assert msg in expect_and_capture_ansible_exception(policy_obj.delete_on_access_policy_rest, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py new file mode 100644 index 000000000..8060cef9a --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task.py @@ -0,0 +1,135 @@ +''' unit tests for Ansible module: na_ontap_vscan_on_demand_task ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_on_demand_task \ + import NetAppOntapVscanOnDemandTask as onDemand_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'task': + xml = self.build_onDemand_pool_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_onDemand_pool_info(onDemand_details): + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'vscan-on-demand-task-info': { + 'task-name': onDemand_details['task_name'], + 'report-directory': onDemand_details['report_directory'], + 'scan-paths': { + 'string': onDemand_details['scan_paths'] + } + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_job_schedule ''' + + def setUp(self): + self.mock_onDemand = { + 'state': 'present', + 'vserver': 'test_vserver', + 'report_directory': '/', + 'task_name': '/', + 'scan_paths': '/' + } + + def mock_args(self): + return { + 'state': self.mock_onDemand['state'], + 'vserver': self.mock_onDemand['vserver'], + 'report_directory': self.mock_onDemand['report_directory'], + 'task_name': self.mock_onDemand['task_name'], + 'scan_paths': self.mock_onDemand['scan_paths'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'never' + } + + def get_demand_mock_object(self, kind=None): + scanner_obj = onDemand_module() + scanner_obj.asup_log_for_cserver = Mock(return_value=None) + if kind is None: + scanner_obj.server = MockONTAPConnection() + else: + scanner_obj.server = MockONTAPConnection(kind='task', data=self.mock_onDemand) + return scanner_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + onDemand_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_nonexistent_demand_task(self): + set_module_args(self.mock_args()) + result = self.get_demand_mock_object().get_demand_task() + assert not result + + def test_get_existing_demand_task(self): + set_module_args(self.mock_args()) + result = self.get_demand_mock_object('task').get_demand_task() + assert result + + def test_successfully_create(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_demand_mock_object().apply() + assert exc.value.args[0]['changed'] + + def test_create_idempotency(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_demand_mock_object('task').apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_delete(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_demand_mock_object('task').apply() + assert exc.value.args[0]['changed'] + + def test_delete_idempotency(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_demand_mock_object().apply() + assert not exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task_rest.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task_rest.py new file mode 100644 index 000000000..0630bdff7 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_on_demand_task_rest.py @@ -0,0 +1,184 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import get_mock_record, \ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_on_demand_task \ + import NetAppOntapVscanOnDemandTask as my_module, main as my_main # module under test + +# needed for get and modify/delete as they still use ZAPI +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + 'on_demand_task': (200, {"records": [ + { + "log_path": "/vol0/report_dir", + "scan_paths": [ + "/vol1/", + "/vol2/cifs/" + ], + "name": "task-1", + "svm": { + "name": "svm1", + "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" + }, + "scope": { + "exclude_paths": [ + "/vol1/cold-files/", + "/vol1/cifs/names" + ], + "scan_without_extension": True, + "include_extensions": [ + "vmdk", + "mp*" + ], + "exclude_extensions": [ + "mp3", + "mp4" + ], + "max_file_size": "10737418240" + }, + "schedule": { + "name": "weekly", + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" + } + } + ]}, None), + 'svm_info': (200, { + "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", + "name": "svm1", + }, None), +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'svm1', + 'use_rest': 'always', + 'task_name': 'carchi8pytask', + 'scan_paths': ['/vol/vol1/'], + 'report_directory': '/', +} + + +def test_get_svm_uuid(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['svm_info']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + assert my_obj.get_svm_uuid() is None + + +def test_get_svm_uuid_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['generic_error']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + msg = 'Error fetching svm uuid: calling: svm/svms: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_obj.get_svm_uuid, 'fail')['msg'] + + +def test_get_vscan_on_demand_task_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['empty_records']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + my_obj.svm_uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + assert my_obj.get_demand_task_rest() is None + + +def test_get_vscan_on_demand_task_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['generic_error']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + my_obj.svm_uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + msg = 'Error fetching on demand task carchi8pytask: calling: protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_obj.get_demand_task_rest, 'fail')['msg'] + + +def test_create_vscan_on_demand_task(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['svm_info']), + ('GET', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['empty_records']), + ('POST', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['empty_good']) + ]) + assert create_and_apply(my_module, DEFAULT_ARGS, {})['changed'] + + +def test_create_vscan_on_demand_task_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('POST', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['generic_error']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + my_obj.svm_uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + msg = 'Error creating on demand task carchi8pytask: calling: protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_obj.create_demand_task_rest, 'fail')['msg'] + + +def test_create_vscan_on_demand_task_with_all_options(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['svm_info']), + ('GET', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['empty_records']), + ('POST', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['empty_good']) + ]) + module_args = {'file_ext_to_exclude': ['mp3', 'mp4'], + 'file_ext_to_include': ['vmdk', 'mp*'], + 'max_file_size': '10737418240', + 'paths_to_exclude': ['/vol1/cold-files/', '/vol1/cifs/names'], + 'schedule': 'weekly'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_vscan_on_demand_task(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('GET', 'svm/svms', SRR['svm_info']), + ('GET', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies', SRR['on_demand_task']), + ('DELETE', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies/carchi8pytask', SRR['empty_good']) + ]) + module_args = {'state': 'absent'} + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_delete_vscan_on_demand_task_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest']), + ('DELETE', 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies/carchi8pytask', SRR['generic_error']) + ]) + set_module_args(DEFAULT_ARGS) + my_obj = my_module() + my_obj.svm_uuid = '1cd8a442-86d1-11e0-ae1c-123478563412' + msg = 'Error deleting on demand task carchi8pytask: calling: ' + \ + 'protocols/vscan/1cd8a442-86d1-11e0-ae1c-123478563412/on-demand-policies/carchi8pytask: got Expected error.' + assert msg in expect_and_capture_ansible_exception(my_obj.delete_demand_task_rest, 'fail')['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py new file mode 100644 index 000000000..b80e01e82 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vscan_scanner_pool.py @@ -0,0 +1,154 @@ +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_vscan_scanner_pool ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import Mock +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vscan_scanner_pool \ + import NetAppOntapVscanScannerPool as scanner_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +class MockONTAPConnection(object): + ''' mock server connection to ONTAP host ''' + + def __init__(self, kind=None, data=None): + ''' save arguments ''' + self.kind = kind + self.params = data + self.xml_in = None + self.xml_out = None + + def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument + ''' mock invoke_successfully returning xml data ''' + self.xml_in = xml + if self.kind == 'scanner': + xml = self.build_scanner_pool_info(self.params) + self.xml_out = xml + return xml + + @staticmethod + def build_scanner_pool_info(sanner_details): + xml = netapp_utils.zapi.NaElement('xml') + attributes = { + 'num-records': 1, + 'attributes-list': { + 'vscan-scanner-pool-info': { + 'scanner-pool': sanner_details['scanner_pool'], + 'scanner-policy': sanner_details['scanner_policy'], + 'hostnames': [ + {'hostname': sanner_details['hostnames'][0]}, + {'hostname': sanner_details['hostnames'][1]} + ], + 'privileged-users': [ + {"privileged-user": sanner_details['privileged_users'][0]}, + {"privileged-user": sanner_details['privileged_users'][1]} + ] + } + } + } + xml.translate_struct(attributes) + return xml + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_job_schedule ''' + + def setUp(self): + self.mock_scanner = { + 'state': 'present', + 'scanner_pool': 'test_pool', + 'vserver': 'test_vserver', + 'hostnames': ['host1', 'host2'], + 'privileged_users': ['domain\\admin', 'domain\\carchi8py'], + 'scanner_policy': 'primary' + } + + def mock_args(self): + return { + 'state': self.mock_scanner['state'], + 'scanner_pool': self.mock_scanner['scanner_pool'], + 'vserver': self.mock_scanner['vserver'], + 'hostnames': self.mock_scanner['hostnames'], + 'privileged_users': self.mock_scanner['privileged_users'], + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'scanner_policy': self.mock_scanner['scanner_policy'] + } + + def get_scanner_mock_object(self, kind=None): + scanner_obj = scanner_module() + scanner_obj.asup_log_for_cserver = Mock(return_value=None) + if kind is None: + scanner_obj.server = MockONTAPConnection() + else: + scanner_obj.server = MockONTAPConnection(kind='scanner', data=self.mock_scanner) + return scanner_obj + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + scanner_module() + print('Info: %s' % exc.value.args[0]['msg']) + + def test_get_nonexistent_scanner(self): + ''' Test if get_scanner_pool returns None for non-existent job ''' + set_module_args(self.mock_args()) + result = self.get_scanner_mock_object().get_scanner_pool() + assert not result + + def test_get_existing_scanner(self): + ''' Test if get_scanner_pool returns None for non-existent job ''' + set_module_args(self.mock_args()) + result = self.get_scanner_mock_object('scanner').get_scanner_pool() + assert result + + def test_successfully_create(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_scanner_mock_object().apply() + assert exc.value.args[0]['changed'] + + def test_create_idempotency(self): + set_module_args(self.mock_args()) + with pytest.raises(AnsibleExitJson) as exc: + self.get_scanner_mock_object('scanner').apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_delete(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_scanner_mock_object('scanner').apply() + assert exc.value.args[0]['changed'] + + def test_delete_idempotency(self): + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_scanner_mock_object().apply() + assert not exc.value.args[0]['changed'] + + def test_successfully_modify(self): + data = self.mock_args() + data['hostnames'] = "host1" + set_module_args(data) + with pytest.raises(AnsibleExitJson) as exc: + self.get_scanner_mock_object('scanner').apply() + assert exc.value.args[0]['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_audit.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_audit.py new file mode 100644 index 000000000..9a4ec6f91 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_audit.py @@ -0,0 +1,354 @@ +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_vserver_audit ''' + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, call_main, create_module, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_audit \ + import NetAppONTAPVserverAudit as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +# REST API canned responses when mocking send_request +SRR = rest_responses({ + # module specific responses + 'audit_record': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + "enabled": True, + "events": { + "authorization_policy": True, + "cap_staging": True, + "cifs_logon_logoff": False, + "file_operations": False, + "file_share": True, + "security_group": True, + "user_account": True + }, + "log_path": "/", + "log": { + "format": "xml", + "retention": {"count": 4}, + "rotation": {"size": 1048576} + }, + "guarantee": False + } + ], + "num_records": 1 + }, None + ), + 'audit_record_modified': ( + 200, + { + "records": [ + { + "svm": { + "uuid": "671aa46e-11ad-11ec-a267-005056b30cfa", + "name": "vserver" + }, + "enabled": False, + "events": { + "authorization_policy": True, + "cap_staging": True, + "cifs_logon_logoff": False, + "file_operations": False, + "file_share": True, + "security_group": True, + "user_account": True + }, + "log_path": "/", + "log": { + "format": "xml", + "retention": {"count": 4}, + "rotation": {"size": 1048576} + }, + "guarantee": False + } + ], + "num_records": 1 + }, None + ), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + +ARGS_REST = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + 'use_rest': 'always', + 'vserver': 'vserver', +} + + +def test_get_nonexistent_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['empty_records']), + ]) + audit_obj = create_module(my_module, ARGS_REST) + result = audit_obj.get_vserver_audit_configuration_rest() + assert result is None + + +def test_get_existent_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ]) + audit_obj = create_module(my_module, ARGS_REST) + result = audit_obj.get_vserver_audit_configuration_rest() + assert result + + +def test_error_get_existent_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['generic_error']), + ]) + error = call_main(my_main, ARGS_REST, fail=True)['msg'] + msg = "Error on fetching vserver audit configuration" + assert msg in error + + +def test_create_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['empty_records']), + ('POST', 'protocols/audit', SRR['empty_good']), + ]) + module_args = { + "enabled": False, + "events": { + "authorization_policy": False, + "cap_staging": False, + "cifs_logon_logoff": True, + "file_operations": True, + "file_share": False, + "security_group": False, + "user_account": False + }, + "log_path": "/", + "log": { + "format": "xml", + "retention": {"count": 4}, + "rotation": {"size": 1048576} + } + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_create_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['empty_records']), + ('POST', 'protocols/audit', SRR['generic_error']), + ]) + module_args = { + "enabled": False, + "events": { + "authorization_policy": False, + "cap_staging": False, + "cifs_logon_logoff": True, + "file_operations": True, + "file_share": False, + "security_group": False, + "user_account": False + }, + "log_path": "/", + "log": { + "format": "xml", + "retention": {"count": 4}, + "rotation": {"size": 1048576} + }, + "guarantee": False + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on creating vserver audit configuration" + assert msg in error + + +def test_modify_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + "enabled": True, + "events": { + "authorization_policy": True, + "cap_staging": True, + "cifs_logon_logoff": False, + "file_operations": False, + "file_share": True, + "security_group": True, + "user_account": True + }, + "log_path": "/tmp", + "log": { + "format": "evtx", + "retention": {"count": 5}, + "rotation": {"size": 10485760} + } + + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_enable_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + "enabled": False + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_modify_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + "enabled": True, + "events": { + "authorization_policy": True, + "cap_staging": True, + "cifs_logon_logoff": False, + "file_operations": False, + "file_share": True, + "security_group": True, + "user_account": True + }, + "log_path": "/tmp", + "log": { + "format": "evtx", + "retention": {"count": 5}, + "rotation": {"size": 10485760} + } + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on modifying vserver audit configuration" + assert msg in error + + +def test_error_enabling_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + "enabled": False, + "events": { + "authorization_policy": False, + "cap_staging": False, + "cifs_logon_logoff": False, + "file_operations": False, + "file_share": True, + "security_group": True, + "user_account": True + }, + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_error_disabling_events_audit_config_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ]) + module_args = { + "events": { + "authorization_policy": False, + "cap_staging": False, + "cifs_logon_logoff": False, + "file_operations": False, + "file_share": False, + "security_group": False, + "user_account": False + }, + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "At least one event should be enabled" + assert msg in error + + +@patch('time.sleep') +def test_delete_audit_config_rest(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ('GET', 'protocols/audit', SRR['audit_record_modified']), + ('DELETE', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ]) + module_args = { + "state": "absent" + } + assert call_main(my_main, ARGS_REST, module_args)['changed'] + + +@patch('time.sleep') +def test_error_delete_audit_config_rest(sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ('PATCH', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['empty_good']), + ('GET', 'protocols/audit', SRR['audit_record_modified']), + ('DELETE', 'protocols/audit/671aa46e-11ad-11ec-a267-005056b30cfa', SRR['generic_error']), + ]) + module_args = { + "state": "absent" + } + error = call_main(my_main, ARGS_REST, module_args, fail=True)['msg'] + msg = "Error on deleting vserver audit configuration" + assert msg in error + + +def test_create_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['audit_record']), + ]) + module_args = { + 'state': 'present' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] + + +def test_delete_idempotent_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_10_1']), + ('GET', 'protocols/audit', SRR['empty_records']) + ]) + module_args = { + 'state': 'absent' + } + assert not call_main(my_main, ARGS_REST, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py new file mode 100644 index 000000000..6cb823d40 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_cifs_security.py @@ -0,0 +1,111 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, AnsibleFailJson +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_cifs_security \ + import NetAppONTAPCifsSecurity as cifs_security_module # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +cifs_security_info = { + 'num-records': 1, + 'attributes-list': { + 'cifs-security': { + 'is_aes_encryption_enabled': False, + 'lm_compatibility_level': 'krb', + 'kerberos_clock_skew': 20 + } + } +} + +ZRR = zapi_responses({ + 'cifs_security_info': build_zapi_response(cifs_security_info) +}) + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'vserver', + 'use_rest': 'never', + 'is_aes_encryption_enabled': False, + 'lm_compatibility_level': 'krb', + 'kerberos_clock_skew': 20 +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + cifs_security_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_get(): + register_responses([ + ('cifs-security-get-iter', ZRR['cifs_security_info']) + ]) + cifs_obj = create_module(cifs_security_module, DEFAULT_ARGS) + result = cifs_obj.cifs_security_get_iter() + assert result + + +def test_modify_int_option(): + register_responses([ + ('cifs-security-get-iter', ZRR['cifs_security_info']), + ('cifs-security-modify', ZRR['success']), + ]) + module_args = { + 'kerberos_clock_skew': 15 + } + assert create_and_apply(cifs_security_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_modify_bool_option(): + register_responses([ + ('cifs-security-get-iter', ZRR['cifs_security_info']), + ('cifs-security-modify', ZRR['success']), + ]) + module_args = { + 'is_aes_encryption_enabled': True + } + assert create_and_apply(cifs_security_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_bool_option(): + register_responses([ + ('cifs-security-get-iter', ZRR['cifs_security_info']), + ('cifs-security-modify', ZRR['error']), + ]) + module_args = { + 'is_aes_encryption_enabled': True + } + error = create_and_apply(cifs_security_module, DEFAULT_ARGS, fail=True)['msg'] + assert 'Error modifying cifs security' in error + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('cifs-security-modify', ZRR['error']) + ]) + module_args = {'use_rest': 'never', 'is_aes_encryption_enabled': True} + current = {} + my_obj = create_module(cifs_security_module, DEFAULT_ARGS, module_args) + + error = expect_and_capture_ansible_exception(my_obj.cifs_security_modify, 'fail', current)['msg'] + assert 'Error modifying cifs security on vserver: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py new file mode 100644 index 000000000..2af8a151f --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer.py @@ -0,0 +1,440 @@ +# (c) 2018-2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import call_main, set_module_args,\ + AnsibleFailJson, patch_ansible, create_module, create_and_apply, expect_and_capture_ansible_exception, assert_warning_was_raised, print_warnings +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke,\ + register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer \ + import NetAppONTAPVserverPeer as vserver_peer, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +DEFAULT_ARGS = { + 'vserver': 'test', + 'peer_vserver': 'test_peer', + 'peer_cluster': 'test_cluster_peer', + 'local_name_for_peer': 'peer_name', + 'local_name_for_source': 'source_name', + 'applications': ['snapmirror'], + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'feature_flags': {'no_cserver_ems': True}, + 'use_rest': 'never' +} + +vserver_peer_info = { + 'num-records': 1, + 'attributes-list': { + 'vserver-peer-info': { + 'remote-vserver-name': 'test_peer', + 'vserver': 'test', + 'peer-vserver': 'test_peer', + 'peer-state': 'peered' + } + } +} + +cluster_info = { + 'attributes': { + 'cluster-identity-info': {'cluster-name': 'test_cluster_peer'} + } +} + +ZRR = zapi_responses({ + 'vserver_peer_info': build_zapi_response(vserver_peer_info), + 'cluster_info': build_zapi_response(cluster_info) +}) + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_obj = vserver_peer() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_successful_create(): + ''' Test successful create ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']), + ('vserver-peer-create', ZRR['success']), + ('vserver-peer-get-iter', ZRR['vserver_peer_info']), + ('vserver-peer-accept', ZRR['success']) + ]) + args = {'dest_hostname': 'test_destination'} + assert create_and_apply(vserver_peer, DEFAULT_ARGS, args)['changed'] + + +def test_successful_create_new_style(): + ''' Test successful create ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']), + ('vserver-peer-create', ZRR['success']), + ('vserver-peer-get-iter', ZRR['vserver_peer_info']), + ('vserver-peer-accept', ZRR['success']) + ]) + default_args = DEFAULT_ARGS + # test without local name + del default_args['local_name_for_peer'] + del default_args['local_name_for_source'] + args = {'peer_options': {'hostname': 'test_destination'}} + assert create_and_apply(vserver_peer, default_args, args)['changed'] + + +def test_create_idempotency(): + ''' Test create idempotency ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['vserver_peer_info']) + ]) + args = {'peer_options': {'hostname': 'test_destination'}} + assert create_and_apply(vserver_peer, DEFAULT_ARGS, args)['changed'] is False + + +def test_successful_delete(): + ''' Test successful delete peer ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['vserver_peer_info']), + ('vserver-peer-delete', ZRR['success']) + ]) + args = { + 'peer_options': {'hostname': 'test_destination'}, + 'state': 'absent' + } + assert create_and_apply(vserver_peer, DEFAULT_ARGS, args)['changed'] + + +def test_delete_idempotency(): + ''' Test delete idempotency ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']) + ]) + args = {'dest_hostname': 'test_destination', 'state': 'absent'} + assert create_and_apply(vserver_peer, DEFAULT_ARGS, args)['changed'] is False + + +def test_helper_vserver_peer_get_iter(): + ''' Test vserver_peer_get_iter method ''' + args = {'dest_hostname': 'test_destination'} + obj = create_module(vserver_peer, DEFAULT_ARGS, args) + result = obj.vserver_peer_get_iter('source') + print(result.to_string(pretty=True)) + assert result['query'] is not None + assert result['query']['vserver-peer-info'] is not None + info = result['query']['vserver-peer-info'] + assert info['vserver'] == DEFAULT_ARGS['vserver'] + assert info['remote-vserver-name'] == DEFAULT_ARGS['peer_vserver'] + + +def test_dest_hostname_absent(): + my_obj = create_module(vserver_peer, DEFAULT_ARGS) + assert my_obj.parameters['hostname'] == my_obj.parameters['dest_hostname'] + + +def test_get_packet(): + ''' Test vserver_peer_get method ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['vserver_peer_info']) + ]) + args = {'dest_hostname': 'test_destination'} + obj = create_module(vserver_peer, DEFAULT_ARGS, args) + result = obj.vserver_peer_get() + assert 'vserver' in result.keys() + assert 'peer_vserver' in result.keys() + assert 'peer_state' in result.keys() + + +def test_error_on_missing_params_create(): + ''' Test error thrown from vserver_peer_create ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']) + ]) + default_args = DEFAULT_ARGS.copy() + del default_args['applications'] + args = {'dest_hostname': 'test_destination'} + msg = create_and_apply(vserver_peer, default_args, args, fail=True)['msg'] + assert 'applications parameter is missing' in msg + + +def test_get_peer_cluster_called(): + ''' Test get_peer_cluster_name called if peer_cluster is missing ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']), + ('cluster-identity-get', ZRR['cluster_info']), + ('vserver-peer-create', ZRR['success']), + ('vserver-peer-get-iter', ZRR['vserver_peer_info']), + ('vserver-peer-accept', ZRR['success']) + ]) + default_args = DEFAULT_ARGS.copy() + del default_args['peer_cluster'] + args = {'dest_hostname': 'test_destination'} + assert create_and_apply(vserver_peer, default_args, args)['changed'] + + +def test_get_peer_cluster_packet(): + ''' Test get_peer_cluster_name xml packet ''' + register_responses([ + ('cluster-identity-get', ZRR['cluster_info']) + ]) + args = {'dest_hostname': 'test_destination'} + obj = create_module(vserver_peer, DEFAULT_ARGS, args) + result = obj.get_peer_cluster_name() + assert result == DEFAULT_ARGS['peer_cluster'] + + +def test_error_on_first_ZAPI_call(): + ''' Test error thrown from vserver_peer_get ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['error']) + ]) + args = {'dest_hostname': 'test_destination'} + msg = create_and_apply(vserver_peer, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error fetching vserver peer' in msg + + +def test_error_create_new_style(): + ''' Test error in create - peer not visible ''' + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']), + ('vserver-peer-create', ZRR['success']), + ('vserver-peer-get-iter', ZRR['empty']) + ]) + args = {'peer_options': {'hostname': 'test_destination'}} + msg = create_and_apply(vserver_peer, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error retrieving vserver peer information while accepting' in msg + + +def test_if_all_methods_catch_exception(): + register_responses([ + ('vserver-peer-delete', ZRR['error']), + ('cluster-identity-get', ZRR['error']), + ('vserver-peer-create', ZRR['error']) + ]) + args = {'dest_hostname': 'test_destination'} + my_obj = create_module(vserver_peer, DEFAULT_ARGS, args) + + error = expect_and_capture_ansible_exception(my_obj.vserver_peer_delete, 'fail', current={'local_peer_vserver': 'test_peer'})['msg'] + assert 'Error deleting vserver peer test: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.get_peer_cluster_name, 'fail')['msg'] + assert 'Error fetching peer cluster name for peer vserver test_peer: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + error = expect_and_capture_ansible_exception(my_obj.vserver_peer_create, 'fail')['msg'] + assert 'Error creating vserver peer test: NetApp API failed. Reason - 12345:synthetic error for UT purpose' in error + + +def test_error_in_vserver_accept(): + register_responses([ + ('vserver-peer-get-iter', ZRR['empty']), + ('vserver-peer-create', ZRR['success']), + ('vserver-peer-get-iter', ZRR['vserver_peer_info']), + ('vserver-peer-accept', ZRR['error']) + ]) + args = {'dest_hostname': 'test_destination'} + msg = create_and_apply(vserver_peer, DEFAULT_ARGS, args, fail=True)['msg'] + assert 'Error accepting vserver peer test_peer: NetApp API failed. Reason - 12345:synthetic error for UT purpose' == msg + + +DEFAULT_ARGS_REST = { + "hostname": "10.193.177.97", + "username": "admin", + "password": "netapp123", + "https": "yes", + "validate_certs": "no", + "use_rest": "always", + "state": "present", + "dest_hostname": "0.0.0.0", + "vserver": "svmsrc3", + "peer_vserver": "svmdst3", + "applications": ['snapmirror'] +} + + +SRR = rest_responses({ + 'vserver_peer_info': (200, { + "records": [{ + "vserver": "svmsrc1", + "peer_vserver": "svmdst1", + "name": "svmdst1", + "state": "peered", + "local_peer_vserver_uuid": "545d2562-2fca-11ec-8016-005056b3f5d5" + }], + 'num_records': 1 + }, None), + 'cluster_info': (200, {"name": "mohanontap98cluster"}, None), + 'job_info': (200, { + "job": { + "uuid": "d78811c1-aebc-11ec-b4de-005056b30cfa", + "_links": {"self": {"href": "/api/cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa"}} + }}, None), + 'job_not_found': (404, "", {"message": "entry doesn't exist", "code": "4", "target": "uuid"}) +}) + + +def test_ensure_get_server_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['vserver_peer_info']) + ]) + assert create_and_apply(vserver_peer, DEFAULT_ARGS_REST)['changed'] is False + + +def test_ensure_create_server_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['empty_records']), + ('POST', 'svm/peers', SRR['success']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('PATCH', 'svm/peers', SRR['success']) + ]) + assert create_and_apply(vserver_peer, DEFAULT_ARGS_REST, {'peer_cluster': 'peer_cluster'})['changed'] + + +def test_ensure_delete_server_called(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('DELETE', 'svm/peers', SRR['success']) + ]) + assert create_and_apply(vserver_peer, DEFAULT_ARGS_REST, {'state': 'absent'})['changed'] + + +def test_create_vserver_peer_without_cluster_name_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['empty_records']), + ('GET', 'cluster', SRR['cluster_info']), + ('POST', 'svm/peers', SRR['success']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('PATCH', 'svm/peers', SRR['success']) + ]) + assert create_and_apply(vserver_peer, DEFAULT_ARGS_REST)['changed'] + + +def test_create_vserver_peer_with_local_name_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['empty_records']), + ('GET', 'cluster', SRR['cluster_info']), + ('POST', 'svm/peers', SRR['success']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('PATCH', 'svm/peers', SRR['success']) + ]) + args = { + 'local_name_for_peer': 'peer', + 'local_name_for_source': 'source' + } + assert create_and_apply(vserver_peer, DEFAULT_ARGS_REST, args)['changed'] + + +def test_error_in_vserver_accept_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['empty_records']), + ('GET', 'cluster', SRR['cluster_info']), + ('POST', 'svm/peers', SRR['success']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('PATCH', 'svm/peers', SRR['generic_error']) + ]) + msg = create_and_apply(vserver_peer, DEFAULT_ARGS_REST, fail=True)['msg'] + assert 'Error accepting vserver peer relationship on svmdst3: calling: svm/peers: got Expected error.' == msg + + +def test_error_in_vserver_get_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['generic_error']) + ]) + msg = create_and_apply(vserver_peer, DEFAULT_ARGS_REST, fail=True)['msg'] + assert 'Error fetching vserver peer svmsrc3: calling: svm/peers: got Expected error.' == msg + + +def test_error_in_vserver_delete_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('DELETE', 'svm/peers', SRR['generic_error']) + ]) + msg = create_and_apply(vserver_peer, DEFAULT_ARGS_REST, {'state': 'absent'}, fail=True)['msg'] + assert 'Error deleting vserver peer relationship on svmsrc3: calling: svm/peers: got Expected error.' == msg + + +def test_error_in_peer_cluster_get_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['empty_records']), + ('GET', 'cluster', SRR['generic_error']) + ]) + msg = create_and_apply(vserver_peer, DEFAULT_ARGS_REST, fail=True)['msg'] + assert 'Error fetching peer cluster name for peer vserver svmdst3: calling: cluster: got Expected error.' == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_missing_netapp_lib(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + msg = 'Error: the python NetApp-Lib module is required. Import error: None' + assert msg == create_module(vserver_peer, DEFAULT_ARGS, fail=True)['msg'] + + +@patch('time.sleep') +def test_job_error_in_vserver_delete_rest(dont_sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['vserver_peer_info']), + ('DELETE', 'svm/peers', SRR['job_info']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']) + ]) + assert create_and_apply(vserver_peer, DEFAULT_ARGS_REST, {'state': 'absent'})['changed'] + print_warnings() + assert_warning_was_raised('Ignoring job status, assuming success - Issue #45.') + + +@patch('time.sleep') +def test_job_error_in_vserver_create_rest(dont_sleep): + register_responses([ + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'cluster', SRR['is_rest_9_9_0']), + ('GET', 'svm/peers', SRR['empty_records']), + ('GET', 'cluster', SRR['empty_records']), + ('POST', 'svm/peers', SRR['job_info']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'cluster/jobs/d78811c1-aebc-11ec-b4de-005056b30cfa', SRR['job_not_found']), + ('GET', 'svm/peers', SRR['empty_records']), + ]) + assert call_main(my_main, DEFAULT_ARGS_REST, fail=True)['msg'] == 'Error reading vserver peer information on peer svmdst3' + print_warnings() + assert_warning_was_raised('Ignoring job status, assuming success - Issue #45.') diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer_permissions.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer_permissions.py new file mode 100644 index 000000000..b9d5af9b4 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_vserver_peer_permissions.py @@ -0,0 +1,226 @@ +# (c) 2023, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +import sys + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args, \ + patch_ansible, create_and_apply, create_module, expect_and_capture_ansible_exception, call_main +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import patch_request_and_invoke, register_responses, get_mock_record +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_vserver_peer_permissions \ + import NetAppONTAPVserverPeerPermissions as my_module, main as my_main # module under test + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + +SRR = rest_responses({ + 'peer_record': (200, { + "records": [ + { + "svm": {"name": "ansibleSVM", "uuid": "e3cb5c7fcd20"}, + "cluster_peer": {"name": "test912-2", "uuid": "1e3cb5c7fcd20"}, + "applications": ['snapmirror', 'flexcache'], + }], + "num_records": 1 + }, None), + "no_record": ( + 200, + {"num_records": 0}, + None) +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror'], +} + + +def test_error_validate_vserver_name_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + module_args = { + 'vserver': '*', + 'cluster_peer': 'test912-2' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'As svm name * represents all svms and created by default, please provide a specific SVM name' + assert msg in error + + +def test_error_validate_vserver_apps_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']) + ]) + module_args = { + 'state': 'present', + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': [''] + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Applications field cannot be empty, at least one application must be specified' + assert msg in error + + +def test_get_vserver_peer_permission_rest_none(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['empty_records']) + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + result = my_obj.get_vserver_peer_permission_rest() + assert result is None + + +def test_get_vserver_peer_permission_rest_error(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['generic_error']) + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + } + my_module_object = create_module(my_module, DEFAULT_ARGS, module_args) + msg = 'Error on fetching vserver peer permissions' + assert msg in expect_and_capture_ansible_exception(my_module_object.get_vserver_peer_permission_rest, 'fail')['msg'] + + +def test_create_vserver_peer_permission_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['empty_records']), + ('POST', 'svm/peer-permissions', SRR['empty_good']) + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_vserver_peer_permission_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['empty_records']), + ('POST', 'svm/peer-permissions', SRR['generic_error']) + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror'] + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error on creating vserver peer permissions' + assert msg in error + + +def test_modify_vserver_peer_permission_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['peer_record']), + ('PATCH', 'svm/peer-permissions/1e3cb5c7fcd20/e3cb5c7fcd20', SRR['empty_good']) + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror'] + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_modify_vserver_peer_permission_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['peer_record']), + ('PATCH', 'svm/peer-permissions/1e3cb5c7fcd20/e3cb5c7fcd20', SRR['generic_error']) + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror'] + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error on modifying vserver peer permissions' + assert msg in error + + +def test_delete_vserver_peer_permission_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['peer_record']), + ('DELETE', 'svm/peer-permissions/1e3cb5c7fcd20/e3cb5c7fcd20', SRR['empty_good']) + ]) + module_args = { + 'state': 'absent', + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_error_delete_vserver_peer_permission_rest(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['peer_record']), + ('DELETE', 'svm/peer-permissions/1e3cb5c7fcd20/e3cb5c7fcd20', SRR['generic_error']) + ]) + module_args = { + 'state': 'absent', + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2' + } + error = call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + print('Info: %s' % error) + msg = 'Error on deleting vserver peer permissions' + assert msg in error + + +def test_successfully_vserver_peer_permission_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['peer_record']), + ]) + module_args = { + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror', 'flexcache'] + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_successfully_delete_vserver_peer_permission_rest_idempotency(): + register_responses([ + ('GET', 'cluster', SRR['is_rest_96']), + ('GET', 'svm/peer-permissions', SRR['empty_records']), + ]) + module_args = { + 'state': 'absent', + 'vserver': 'ansibleSVM', + 'cluster_peer': 'test912-2', + 'applications': ['snapmirror', 'flexcache'] + } + assert not call_main(my_main, DEFAULT_ARGS, module_args)['changed'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wait_for_condition.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wait_for_condition.py new file mode 100644 index 000000000..b851a9842 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wait_for_condition.py @@ -0,0 +1,485 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test template for ONTAP Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + call_main, create_module, expect_and_capture_ansible_exception, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.rest_factory import rest_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_wait_for_condition \ + import NetAppONTAPWFC as my_module, main as my_main + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def sp_image_update_progress_info(in_progress=True): + return { + 'attributes': { + 'service-processor-image-update-progress-info': { + 'is-in-progress': 'true' if in_progress else 'false', + } + } + } + + +def sp_info(version): + return { + 'attributes': { + 'service-processor--info': { + 'firmware-version': version, + } + } + } + + +ZRR = zapi_responses({ + 'sp_info_3_09': build_zapi_response(sp_info('3.09'), 1), + 'sp_info_3_10': build_zapi_response(sp_info('3.10'), 1), + 'sp_image_update_progress_info_in_progress': build_zapi_response(sp_image_update_progress_info(True), 1), + 'sp_image_update_progress_info_idle': build_zapi_response(sp_image_update_progress_info(False), 1), +}) + + +SRR = rest_responses({ + 'one_record_home_node': (200, {'records': [ + {'name': 'node2_abc_if', + 'uuid': '54321', + 'enabled': True, + 'location': {'home_port': {'name': 'e0c'}, 'home_node': {'name': 'node2'}, 'node': {'name': 'node2'}, 'port': {'name': 'e0c'}} + }]}, None), + 'one_record_vserver': (200, {'records': [{ + 'name': 'abc_if', + 'uuid': '54321', + 'svm': {'name': 'vserver', 'uuid': 'svm_uuid'}, + 'data_protocol': ['nfs'], + 'enabled': True, + 'ip': {'address': '10.11.12.13', 'netmask': '255.192.0.0'}, + 'location': { + 'home_port': {'name': 'e0c'}, + 'home_node': {'name': 'node2'}, + 'node': {'name': 'node2'}, + 'port': {'name': 'e0c'}, + 'auto_revert': True, + 'failover': True + }, + 'service_policy': {'name': 'data-mgmt'} + }]}, None), + 'two_records': (200, {'records': [{'name': 'node2_abc_if'}, {'name': 'node2_abc_if'}]}, None), + 'error_precluster': (500, None, {'message': 'are available in precluster.'}), + 'cluster_identity': (200, {'location': 'Oz', 'name': 'abc'}, None), + 'node_309_online': (200, {'records': [ + {'service_processor': {'firmware_version': '3.09', 'state': 'online'}} + ]}, None), + 'node_309_updating': (200, {'records': [ + {'service_processor': {'firmware_version': '3.09', 'state': 'updating'}} + ]}, None), + 'node_310_online': (200, {'records': [ + {'service_processor': {'firmware_version': '3.10', 'state': 'online'}} + ]}, None), + 'snapmirror_relationship': (200, {'records': [ + {'state': 'snapmirrored'} + ]}, None), +}, False) + +DEFAULT_ARGS = { + 'hostname': '10.10.10.10', + 'username': 'admin', + 'password': 'password', + 'attributes': { + 'node': 'node1', + 'expected_version': '3.10' + } +} + + +def test_module_fail_when_required_args_missing(): + ''' required arguments are reported as errors ''' + module_args = { + 'use_rest': 'never' + } + error = create_module(my_module, module_args, fail=True)['msg'] + assert 'missing required arguments:' in error + assert 'name' in error + assert 'conditions' in error + + +@patch('time.sleep') +def test_rest_successful_wait_for_sp_upgrade(dont_sleep): + ''' Test successful sp_upgrade check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_updating']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_upgrade', + 'conditions': 'is_in_progress', + } + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert results['msg'] == 'matched condition: is_in_progress' + assert results['states'] == 'online*2,updating' + assert results['last_state'] == 'updating' + + +@patch('time.sleep') +def test_rest_successful_wait_for_snapmirror_relationship(dont_sleep): + ''' Test successful snapmirror_relationship check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'snapmirror/relationships', SRR['snapmirror_relationship']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'snapmirror_relationship', + 'conditions': 'transfer_state', + 'attributes': { + 'destination_path': 'path', + 'expected_transfer_state': 'idle' + } + } + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert results['msg'] == 'matched condition: transfer_state' + # these are generated from dictionaries keys, and sequence is not guaranteed with python 3.5 + assert results['states'] in ['snapmirrored,idle', 'idle'] + assert results['last_state'] == 'idle' + + +@patch('time.sleep') +def test_rest_successful_wait_for_sp_version(dont_sleep): + ''' Test successful sp_version check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_updating']), + ('GET', 'cluster/nodes', SRR['generic_error']), + ('GET', 'cluster/nodes', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['node_309_updating']), + ('GET', 'cluster/nodes', SRR['node_310_online']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert results['msg'] == 'matched condition: firmware_version' + assert results['states'] == '3.09*4,3.10' + assert results['last_state'] == '3.10' + + +@patch('time.sleep') +def test_rest_successful_wait_for_sp_version_not_matched(dont_sleep): + ''' Test successful sp_version check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_updating']), + ('GET', 'cluster/nodes', SRR['generic_error']), + ('GET', 'cluster/nodes', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['node_309_updating']), + ('GET', 'cluster/nodes', SRR['node_310_online']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_version', + 'conditions': ['firmware_version'], + 'state': 'absent', + 'attributes': { + 'node': 'node1', + 'expected_version': '3.09' + } + } + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert results['msg'] == 'conditions not matched' + assert results['states'] == '3.09*4,3.10' + assert results['last_state'] == '3.10' + + +@patch('time.sleep') +def test_rest_negative_wait_for_sp_version_error(dont_sleep): + ''' Test negative sp_version check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['zero_records']), + ('GET', 'cluster/nodes', SRR['zero_records']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + error = 'Error: no record for node:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_rest_negative_wait_for_sp_version_timeout(dont_sleep): + ''' Test negative sp_version check ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ('GET', 'cluster/nodes', SRR['node_309_online']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_version', + 'conditions': 'firmware_version', + 'timeout': 40, + 'polling_interval': 12, + } + error = 'Error: timeout waiting for condition: firmware_version==3.10.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_zapi_successful_wait_for_sp_upgrade(dont_sleep): + ''' Test successful sp_upgrade check ''' + register_responses([ + ('ZAPI', 'service-processor-image-update-progress-get', ZRR['sp_image_update_progress_info_idle']), + ('ZAPI', 'service-processor-image-update-progress-get', ZRR['sp_image_update_progress_info_idle']), + ('ZAPI', 'service-processor-image-update-progress-get', ZRR['sp_image_update_progress_info_in_progress']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'sp_upgrade', + 'conditions': 'is_in_progress', + } + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert results['msg'] == 'matched condition: is_in_progress' + assert results['states'] == 'false*2,true' + assert results['last_state'] == 'true' + + +@patch('time.sleep') +def test_zapi_successful_wait_for_sp_version(dont_sleep): + ''' Test successful sp_version check ''' + register_responses([ + ('ZAPI', 'service-processor-get', ZRR['sp_info_3_09']), + ('ZAPI', 'service-processor-get', ZRR['error']), + ('ZAPI', 'service-processor-get', ZRR['sp_info_3_09']), + ('ZAPI', 'service-processor-get', ZRR['sp_info_3_10']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + results = call_main(my_main, DEFAULT_ARGS, module_args) + assert results['msg'] == 'matched condition: firmware_version' + assert results['states'] == '3.09*2,3.10' + assert results['last_state'] == '3.10' + + +def test_zapi_negative_wait_for_snapmirror_relationship_error(): + ''' Test negative snapmirror_relationship check ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + 'name': 'snapmirror_relationship', + 'conditions': 'state', + 'attributes': { + 'destination_path': 'path', + 'expected_state': 'snapmirrored' + } + } + error = 'Error: event snapmirror_relationship is not supported with ZAPI. It requires REST.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_zapi_negative_wait_for_sp_version_error(dont_sleep): + ''' Test negative sp_version check ''' + register_responses([ + ('ZAPI', 'service-processor-get', ZRR['no_records']), + ('ZAPI', 'service-processor-get', ZRR['no_records']), + ('ZAPI', 'service-processor-get', ZRR['no_records']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + error = 'Error: Cannot find element with name: firmware-version in results:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +@patch('time.sleep') +def test_zapi_negative_wait_for_sp_version_timeout(dont_sleep): + ''' Test negative sp_version check ''' + register_responses([ + ('ZAPI', 'service-processor-get', ZRR['sp_info_3_09']), + ('ZAPI', 'service-processor-get', ZRR['error']), + ('ZAPI', 'service-processor-get', ZRR['sp_info_3_09']), + ('ZAPI', 'service-processor-get', ZRR['sp_info_3_09']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': 'firmware_version', + 'timeout': 30, + 'polling_interval': 9, + } + error = 'Error: timeout waiting for condition: firmware_version==3.10.' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_invalid_name(): + ''' Test that name is valid ''' + register_responses([ + ]) + module_args = { + 'use_rest': 'never', + 'name': 'some_name', + 'conditions': 'firmware_version', + } + error = 'value of name must be one of:' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['use_rest'] = 'always' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_validate_resource(): + ''' KeyError on unexpected name ''' + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'some_name' in expect_and_capture_ansible_exception(my_obj.validate_resource, KeyError, 'some_name') + module_args['use_rest'] = 'always' + assert 'some_name' in expect_and_capture_ansible_exception(my_obj.validate_resource, KeyError, 'some_name') + + +def test_negative_build_zapi(): + ''' KeyError on unexpected name ''' + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'some_name' in expect_and_capture_ansible_exception(my_obj.build_zapi, KeyError, 'some_name') + + +def test_negative_build_rest_api_kwargs(): + ''' KeyError on unexpected name ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + assert 'some_name' in expect_and_capture_ansible_exception(my_obj.build_rest_api_kwargs, KeyError, 'some_name') + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_wait_for_condition.NetAppONTAPWFC.get_record_rest') +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_wait_for_condition.NetAppONTAPWFC.extract_condition') +def test_get_condition_other(mock_extract_condition, mock_get_record_rest): + ''' condition not found, non expected condition ignored ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'always', + 'name': 'sp_version', + 'conditions': 'firmware_version', + 'state': 'absent' + } + my_obj = create_module(my_module, DEFAULT_ARGS, module_args) + condition = 'other_condition' + mock_get_record_rest.return_value = None, None + mock_extract_condition.side_effect = [ + (None, None), + (condition, None), + ] + assert my_obj.get_condition('name', 'dummy') == ('conditions not matched', None) + assert my_obj.get_condition('name', 'dummy') == ('conditions not matched: found other condition: %s' % condition, None) + + +def test_invalid_condition(): + ''' Test that condition is valid ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'sp_upgrade', + 'conditions': [ + 'firmware_version', + 'some_condition' + ] + } + error = 'firmware_version is not valid for resource name: sp_upgrade' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + module_args['use_rest'] = 'always' + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +# def test_invalid_attributes(): +def test_missing_attribute(): + ''' Test that required attributes are present ''' + register_responses([ + ('GET', 'cluster', SRR['is_rest_97']), + ]) + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': [ + 'firmware_version', + ] + } + args = dict(DEFAULT_ARGS) + del args['attributes'] + error = 'name is sp_version but all of the following are missing: attributes' + assert error in call_main(my_main, args, module_args, fail=True)['msg'] + module_args['use_rest'] = 'always' + assert error in call_main(my_main, args, module_args, fail=True)['msg'] + module_args['use_rest'] = 'never' + args['attributes'] = {'node': 'node1'} + error = 'Error: attributes: expected_version is required for resource name: sp_version' + assert error in call_main(my_main, args, module_args, fail=True)['msg'] + module_args['use_rest'] = 'always' + assert error in call_main(my_main, args, module_args, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_negative_missing_netapp_lib(mock_netapp_lib): + ''' create cluster ''' + module_args = { + 'use_rest': 'never', + 'name': 'sp_version', + 'conditions': 'firmware_version', + } + mock_netapp_lib.return_value = False + error = "the python NetApp-Lib module is required" + assert error in call_main(my_main, DEFAULT_ARGS, module_args, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py new file mode 100644 index 000000000..1ceece18c --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_wwpn_alias.py @@ -0,0 +1,192 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for Ansible module: na_ontap_wwpn_alias ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest + +from ansible_collections.netapp.ontap.tests.unit.compat import unittest +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_wwpn_alias \ + import NetAppOntapWwpnAlias as alias_module # module under test + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + # module specific responses + 'get_alias': ( + 200, + {"records": [{ + "svm": { + "uuid": "uuid", + "name": "svm"}, + "alias": "host1", + "wwpn": "01:02:03:04:0a:0b:0c:0d"}], + "num_records": 1}, None), + 'get_svm_uuid': ( + 200, + {"records": [{ + "uuid": "test_uuid" + }]}, None), + "no_record": ( + 200, + {"num_records": 0}, + None) +} + + +class TestMyModule(unittest.TestCase): + ''' Unit tests for na_ontap_wwpn_alias ''' + + def setUp(self): + self.mock_alias = { + 'name': 'host1', + 'vserver': 'test_vserver' + } + + def mock_args(self): + return { + 'vserver': self.mock_alias['vserver'], + 'name': self.mock_alias['name'], + "wwpn": "01:02:03:04:0a:0b:0c:0d", + 'hostname': 'test_host', + 'username': 'test_user', + 'password': 'test_pass!' + } + + def get_alias_mock_object(self): + alias_obj = alias_module() + return alias_obj + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_successful_create(self, mock_request): + '''Test successful rest create''' + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['no_record'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_create_idempotency(self, mock_request): + '''Test rest create idempotency''' + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['get_alias'], + SRR['no_record'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert not exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_create_error(self, mock_request): + '''Test rest create error''' + data = self.mock_args() + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['no_record'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['msg'] == "Error on creating wwpn alias: Expected error." + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_modify(self, mock_request): + '''Test rest modify error''' + data = self.mock_args() + data['wwpn'] = "01:02:03:04:0a:0b:0c:0e" + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['get_alias'], + SRR['empty_good'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleExitJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_modify_error_delete(self, mock_request): + '''Test rest modify error''' + data = self.mock_args() + data['wwpn'] = "01:02:03:04:0a:0b:0c:0e" + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['get_alias'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['msg'] == "Error on modifying wwpn alias when trying to delete alias: Expected error." + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_modify_error_create(self, mock_request): + '''Test rest modify error''' + data = self.mock_args() + data['wwpn'] = "01:02:03:04:0a:0b:0c:0e" + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['get_alias'], + SRR['empty_good'], + SRR['generic_error'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['msg'] == "Error on modifying wwpn alias when trying to re-create alias: Expected error." + + @patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') + def test_rest_delete_error(self, mock_request): + '''Test rest delete error''' + data = self.mock_args() + data['state'] = 'absent' + set_module_args(data) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['get_svm_uuid'], + SRR['get_alias'], + SRR['generic_error'], + SRR['empty_good'], + SRR['end_of_sequence'] + ] + with pytest.raises(AnsibleFailJson) as exc: + self.get_alias_mock_object().apply() + assert exc.value.args[0]['msg'] == "Error on deleting wwpn alias: Expected error." diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_zapit.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_zapit.py new file mode 100644 index 000000000..fa64e1f88 --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_zapit.py @@ -0,0 +1,255 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests ONTAP Ansible module: na_ontap_zapit ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import\ + expect_and_capture_ansible_exception, call_main, create_module, create_and_apply, patch_ansible +from ansible_collections.netapp.ontap.tests.unit.framework.mock_rest_and_zapi_requests import\ + patch_request_and_invoke, register_responses +from ansible_collections.netapp.ontap.tests.unit.framework.zapi_factory import build_zapi_response, zapi_responses + +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_zapit \ + import NetAppONTAPZapi as my_module, main as my_main # module under test + +if not netapp_utils.has_netapp_lib(): + pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') + + +def cluster_image_info(): + version = 'Fattire__9.3.0' + return { + 'num-records': 1, + # composite response, attributes-list for cluster-image-get-iter and attributes for cluster-image-get + 'attributes-list': [ + {'cluster-image-info': { + 'node-id': 'node4test', + 'current-version': version}}, + {'cluster-image-info': { + 'node-id': 'node4test', + 'current-version': version}}, + ], + 'attributes': { + 'cluster-image-info': { + 'node-id': 'node4test', + 'current-version': version + }}, + } + + +def build_zapi_error_custom(errno, reason, results='results'): + ''' build an XML response + errno as int + reason as str + ''' + if not netapp_utils.has_netapp_lib(): + return 'build_zapi_error: netapp-lib is missing', 'invalid' + if results != 'results': + return (netapp_utils.zapi.NaElement(results), 'valid') + xml = {} + if errno is not None: + xml['errorno'] = errno + if reason is not None: + xml['reason'] = reason + response = netapp_utils.zapi.NaElement('results') + if xml: + response.translate_struct(xml) + return (response, 'valid') + + +ZRR = zapi_responses({ + 'cluster_image_info': build_zapi_response(cluster_image_info()), + 'error_no_errno': build_zapi_error_custom(None, 'some reason'), + 'error_no_reason': build_zapi_error_custom(18408, None), + 'error_no_results': build_zapi_error_custom(None, None, 'no_results') +}) + + +DEFAULT_ARGS = { + 'hostname': 'hostname', + 'username': 'username', + 'password': 'password', + 'zapi': {'cluster-image-get-iter': None} +} + + +def test_ensure_zapi_called_cluster(): + register_responses([ + + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ]) + module_args = { + "use_rest": "never", + } + assert create_and_apply(my_module, DEFAULT_ARGS, module_args)['changed'] + + +def test_ensure_zapi_called_vserver(): + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['cluster_image_info']), + ]) + module_args = { + "use_rest": "never", + "vserver": "vserver", + "zapi": {'cluster-image-get-iter': {'attributes': None}} + } + assert call_main(my_main, DEFAULT_ARGS, module_args)['changed'] + + +def test_negative_zapi_called_attributes(): + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['error']), + ]) + module_args = { + "use_rest": "never", + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + assert exception['msg'] == 'ZAPI failure: check errno and reason.' + assert exception['errno'] == '12345' + assert exception['reason'] == 'synthetic error for UT purpose' + + +def test_negative_zapi_called_element_no_errno(): + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['error_no_errno']), + ]) + module_args = { + "use_rest": "never", + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + assert exception['msg'] == 'ZAPI failure: check errno and reason.' + assert exception['errno'] == 'ESTATUSFAILED' + assert exception['reason'] == 'some reason' + + +def test_negative_zapi_called_element_no_reason(): + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['error_no_reason']), + ]) + module_args = { + "use_rest": "never", + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + assert exception['msg'] == 'ZAPI failure: check errno and reason.' + assert exception['errno'] == '18408' + assert exception['reason'] == 'Execution failure with unknown reason.' + + +def test_negative_zapi_unexpected_error(): + register_responses([ + ('ZAPI', 'cluster-image-get-iter', (netapp_utils.zapi.NaApiError(), 'valid')), + ]) + module_args = { + "use_rest": "never", + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + assert exception['msg'] == "Error running zapi cluster-image-get-iter: NetApp API failed. Reason - unknown:unknown" + + +def test_negative_two_zapis(): + register_responses([ + ]) + module_args = { + "use_rest": "never", + "zapi": {"1": 1, "2": 2} + } + exception = create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True) + assert 'A single ZAPI can be called at a time, received: ' in exception['msg'] + + +def test_negative_bad_zapi_type(): + register_responses([ + ]) + module_args = { + "use_rest": "never", + } + obj = create_module(my_module, DEFAULT_ARGS, module_args) + obj.zapi = "1" + error = 'A directory entry is expected, eg: system-get-version: , received: 1' + assert expect_and_capture_ansible_exception(obj.run_zapi, 'fail')['msg'] == error + obj.zapi = [3, 1] + error = 'A directory entry is expected, eg: system-get-version: , received: [3, 1]' + assert expect_and_capture_ansible_exception(obj.run_zapi, 'fail')['msg'] == error + + +# python 2.7 does not have bytes but str +BYTES_MARKER_BEGIN = "b'" if sys.version_info >= (3, 0) else '' +BYTES_MARKER_END = "'" if sys.version_info >= (3, 0) else '' +BYTES_TYPE = 'bytes' if sys.version_info >= (3, 0) else 'str' + + +def test_negative_zapi_called_element_no_results(): + register_responses([ + ('ZAPI', 'cluster-image-get-iter', ZRR['error_no_results']), + ]) + module_args = { + "use_rest": "never", + } + error = "Error running zapi, no results field: %s" % BYTES_MARKER_BEGIN + assert error in create_and_apply(my_module, DEFAULT_ARGS, module_args, fail=True)['msg'] + + +def test_negative_bad_zapi_response_to_string(): + module_args = { + "use_rest": "never", + } + obj = create_module(my_module, DEFAULT_ARGS, module_args) + error = "Error running zapi in to_string: '%s' object has no attribute 'to_string'" % BYTES_TYPE + assert expect_and_capture_ansible_exception(obj.jsonify_and_parse_output, 'fail', b'elem_valueelem_value') + error = "Error running zapi, no results field" + assert error in expect_and_capture_ansible_exception(obj.jsonify_and_parse_output, 'fail', xml)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.has_netapp_lib') +def test_fail_netapp_lib_error(mock_has_netapp_lib): + mock_has_netapp_lib.return_value = False + assert 'Error: the python NetApp-Lib module is required. Import error: None' == call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_zapit.HAS_JSON', False) +def test_fail_netapp_lib_error(): + assert 'the python json module is required' == call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] + + +@patch('ansible_collections.netapp.ontap.plugins.modules.na_ontap_zapit.HAS_XMLTODICT', False) +def test_fail_netapp_lib_error(): + assert 'the python xmltodict module is required' == call_main(my_main, DEFAULT_ARGS, fail=True)['msg'] diff --git a/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_ontap_fdspt.py b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_ontap_fdspt.py new file mode 100644 index 000000000..80512768e --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_ontap_fdspt.py @@ -0,0 +1,164 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit test for ONTAP na_ontap_fdspt Ansible module ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import pytest +import sys + +from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch +import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils +# pylint: disable=unused-import +from ansible_collections.netapp.ontap.tests.unit.plugins.module_utils.ansible_mocks import set_module_args,\ + AnsibleFailJson, AnsibleExitJson, patch_ansible + +from ansible_collections.netapp.ontap.plugins.modules.na_ontap_fdspt \ + import NetAppOntapFDSPT as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + + +def default_args(): + args = { + 'name': 'policy1', + 'vserver': 'vserver1', + 'hostname': '10.10.10.10', + 'username': 'username', + 'password': 'password', + 'use_rest': 'always', + 'ntfs_mode': 'ignore', + 'security_type': 'ntfs', + 'path': '/' + } + return args + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'is_rest': (200, dict(version=dict(generation=9, major=9, minor=0, full='dummy')), None), + 'is_rest_9_8': (200, dict(version=dict(generation=9, major=8, minor=0, full='dummy')), None), + 'is_zapi': (400, {}, "Unreachable"), + 'empty_good': (200, {}, None), + 'zero_record': (200, dict(records=[], num_records=0), None), + 'one_record_uuid': (200, dict(records=[dict(uuid='a1b2c3')], num_records=1), None), + 'end_of_sequence': (500, None, "Unexpected call to send_request"), + 'generic_error': (400, None, "Expected error"), + 'policy_task_record': ( + 200, { + 'records': [{ + 'vserver': 'vserver1', + 'policy_name': 'policy1', + 'index_num': 1, + 'path': '/', + 'security_type': 'ntfs', + 'ntfs_mode': 'ignore', + 'access_control': 'file_directory'}], + 'num_records': 1}, + None), +} + + +def test_module_fail_when_required_args_missing(patch_ansible): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + +def test_rest_missing_arguments(patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' test missing arguements ''' + args = dict(default_args()) + del args['hostname'] + set_module_args(args) + with pytest.raises(AnsibleFailJson) as exc: + my_module() + msg = 'missing required arguments: hostname' + assert exc.value.args[0]['msg'] == msg + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_create(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Create security policies''' + args = dict(default_args()) + args['name'] = 'new_policy_task' + print(args) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['zero_record'], + SRR['empty_good'], # create + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_remove(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' remove Security policies ''' + args = dict(default_args()) + args['state'] = 'absent' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['policy_task_record'], + SRR['empty_good'], # delete + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 3 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_modify(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' remove Security policies ''' + args = dict(default_args()) + args['state'] = 'present' + args['name'] = 'policy1' + args['ntfs_mode'] = 'replace' + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['policy_task_record'], + SRR['empty_good'], # delete + SRR['empty_good'], # add + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is True + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 4 + + +@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request') +def test_rest_no_action(mock_request, patch_ansible): # pylint: disable=redefined-outer-name,unused-argument + ''' Idempotent test ''' + args = dict(default_args()) + set_module_args(args) + mock_request.side_effect = [ + SRR['is_rest'], + SRR['policy_task_record'], + SRR['end_of_sequence'] + ] + my_obj = my_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] is False + print(mock_request.mock_calls) + assert len(mock_request.mock_calls) == 2 diff --git a/ansible_collections/netapp/ontap/tests/unit/requirements.txt b/ansible_collections/netapp/ontap/tests/unit/requirements.txt new file mode 100644 index 000000000..290e4346d --- /dev/null +++ b/ansible_collections/netapp/ontap/tests/unit/requirements.txt @@ -0,0 +1,7 @@ +ipaddress ; python_version >= '2.7' +isodate ; python_version >= '2.7' +netapp-lib ; python_version >= '2.7' +requests ; python_version >= '2.7' +six ; python_version >= '2.7' +solidfire-sdk-python ; python_version >= '2.7' +xmltodict ; python_version >= '2.7' diff --git a/ansible_collections/netapp/storagegrid/.github/workflows/coverage.yml b/ansible_collections/netapp/storagegrid/.github/workflows/coverage.yml new file mode 100644 index 000000000..d33950e7a --- /dev/null +++ b/ansible_collections/netapp/storagegrid/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.storagegrid Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on StorageGrid + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.11 + run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/storagegrid/ + rsync -av . ansible_collections/netapp/storagegrid/ --exclude ansible_collections/netapp/storagegrid/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/storagegrid/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/storagegrid/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/storagegrid/ + verbose: true \ No newline at end of file diff --git a/ansible_collections/netapp/storagegrid/.github/workflows/main.yml b/ansible_collections/netapp/storagegrid/.github/workflows/main.yml new file mode 100644 index 000000000..ec05c061d --- /dev/null +++ b/ansible_collections/netapp/storagegrid/.github/workflows/main.yml @@ -0,0 +1,48 @@ +name: NetApp.storagegrid Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }} on storagegrid + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - stable-2.13 + - devel + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + # Ansible 2.14 requires 3.9 as a minimum + python-version: 3.9 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/storagegrid/ + rsync -av . ansible_collections/netapp/storagegrid/ --exclude ansible_collections/netapp/storagegrid/ + - name: Run sanity tests storagegrid + run: ansible-test sanity --docker -v --color + working-directory: ansible_collections/netapp/storagegrid/ + + - name: Run Unit Tests + run: ansible-test units --docker -v --color + working-directory: ansible_collections/netapp/storagegrid/ diff --git a/ansible_collections/netapp/storagegrid/CHANGELOG.rst b/ansible_collections/netapp/storagegrid/CHANGELOG.rst new file mode 100644 index 000000000..c3d64be86 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/CHANGELOG.rst @@ -0,0 +1,172 @@ +=========================================== +NetApp StorageGRID Collection Release Notes +=========================================== + +.. contents:: Topics + + +v21.11.1 +======== + +Bugfixes +-------- + +- na_sg_org_container - fix versioning not enabled on initial bucket creation. + +v21.11.0 +======== + +Minor Changes +------------- + +- na_sg_org_container - supports versioning configuration for S3 buckets available in StorageGRID 11.6+. + +New Modules +----------- + +- netapp.storagegrid.na_sg_grid_client_certificate - Manage Client Certificates on StorageGRID. + +v21.10.0 +======== + +Minor Changes +------------- + +- na_sg_grid_gateway - supports specifying HA Groups by name or UUID. + +Bugfixes +-------- + +- na_sg_org_group - fixed behaviour where update to ``s3_policy`` is ignored if ``management_policy`` is set. + +New Modules +----------- + +- netapp.storagegrid.na_sg_grid_ha_group - Manage high availability (HA) group configuration on StorageGRID. +- netapp.storagegrid.na_sg_grid_traffic_classes - Manage Traffic Classification Policy configuration on StorageGRID. + +v21.9.0 +======= + +Minor Changes +------------- + +- PR2 - allow usage of Ansible module group defaults - for Ansible 2.12+. +- na_sg_grid_gateway - supports load balancer endpoint binding available in StorageGRID 11.5+. +- na_sg_org_container - supports creation of S3 Object Lock buckets available in StorageGRID 11.5+. + +Bugfixes +-------- + +- na_sg_grid_account - minor documentation fix. +- na_sg_grid_gateway - existing endpoints matched by ``name`` and ``port``. + +v21.8.0 +======= + +Minor Changes +------------- + +- PR2 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +v21.7.0 +======= + +Minor Changes +------------- + +- Updated documentation - added RETURN block for each module + +New Modules +----------- + +- netapp.storagegrid.na_sg_grid_gateway - Manage Load balancer (gateway) endpoints on StorageGRID. + +v21.6.0 +======= + +Minor Changes +------------- + +- na_sg_org_container - supports deletion of buckets when ``state`` is set to ``absent``. + +Bugfixes +-------- + +- na_sg_org_container - fix issue with applying compliance settings on buckets. + +New Modules +----------- + +- netapp.storagegrid.na_sg_grid_certificate - Manage the Storage API and Grid Management certificates on StorageGRID. +- netapp.storagegrid.na_sg_grid_identity_federation - NetApp StorageGRID manage Grid identity federation. +- netapp.storagegrid.na_sg_org_identity_federation - NetApp StorageGRID manage Tenant identity federation. + +v20.11.0 +======== + +Minor Changes +------------- + +- na_sg_grid_account - New option ``root_access_account`` for granting initial root access permissions for the tenant to an existing federated group + +New Modules +----------- + +- netapp.storagegrid.na_sg_grid_info - NetApp StorageGRID Grid information gatherer +- netapp.storagegrid.na_sg_org_info - NetApp StorageGRID Org information gatherer + +v20.10.0 +======== + +Minor Changes +------------- + +- na_sg_grid_account - new option ``update_password`` for managing Tenant Account root password changes. +- na_sg_grid_user - new option ``password`` and ``update_password`` for setting or updating Grid Admin User passwords. +- na_sg_org_user - new option ``password`` and ``update_password`` for setting or updating Tenant User passwords. + +Breaking Changes / Porting Guide +-------------------------------- + +- This version introduces a breaking change. + All modules have been renamed from ``nac_sg_*`` to ``na_sg_*``. + Playbooks and Roles must be updated to match. + +Bugfixes +-------- + +- na_sg_grid_account - added ``no_log`` flag to password fields. +- na_sg_grid_account - fixed documentation issue. +- na_sg_grid_group - fixed group name parsing. +- na_sg_org_group - fixed group name parsing. + +v20.6.1 +======= + +Minor Changes +------------- + +- Fixed documentation issue in README.md + +Bugfixes +-------- + +- nac_sg_org_container - fixed documentation issue. + +v20.6.0 +======= + +New Modules +----------- + +- netapp.storagegrid.nac_sg_grid_account - NetApp StorageGRID Manage Tenant account. +- netapp.storagegrid.nac_sg_grid_dns - NetApp StorageGRID Manage Grid DNS servers. +- netapp.storagegrid.nac_sg_grid_group - NetApp StorageGRID Manage Grid admin group. +- netapp.storagegrid.nac_sg_grid_ntp - NetApp StorageGRID Manage Grid NTP servers. +- netapp.storagegrid.nac_sg_grid_regions - NetApp StorageGRID Manage Grid Regions. +- netapp.storagegrid.nac_sg_grid_user - NetApp StorageGRID Manage Grid admin user. +- netapp.storagegrid.nac_sg_org_container - NetApp StorageGRID Manage S3 bucket. +- netapp.storagegrid.nac_sg_org_group - NetApp StorageGRID Manage Tenant group. +- netapp.storagegrid.nac_sg_org_user - NetApp StorageGRID Manage Tenant user. +- netapp.storagegrid.nac_sg_org_user_s3_key - NetApp StorageGRID Manage S3 key. diff --git a/ansible_collections/netapp/storagegrid/COPYING b/ansible_collections/netapp/storagegrid/COPYING new file mode 100644 index 000000000..20d40b6bc --- /dev/null +++ b/ansible_collections/netapp/storagegrid/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ansible_collections/netapp/storagegrid/FILES.json b/ansible_collections/netapp/storagegrid/FILES.json new file mode 100644 index 000000000..86ed3f4c8 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/FILES.json @@ -0,0 +1,572 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cc4959877dbe6b6c63a8eb1bfe3bfb545fa8fe5b28b1b2c13e4a7c1c0d1c4d4", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e790e69d7116516a69110a233da28e21442e5fee8805b3b6f985854f27f26449", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f7a966f44fca740564887beea1232f8e89bad232cde62d9de9a12428ec442c4", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1a4b77fd27fd5bf2810c0db1d4692093ae5b310992fb183e3817e2e3903891a", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_client_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0df90a4c30e50481afe5f508798187ecfbe31b1f5bb7e29e3d724d41203373f", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_traffic_classes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e27c214bb0ff1e9022a575647d1c05d7e0f7bf5c2b35fa49b576f7beb64ce79c", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_org_user_s3_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d71cb0c2c53424b5ce28ead1b06f9c3b3106c709e4d2f0b74879b0eef2d668b6", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b10f9a744d3f78024f3b82f082338aed13f31b5aa71efa86f4ba8ed8cce3088", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_org_identity_federation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93bf629ba06db59b1c74bfda8422471a64d3804776207dc925b175ed3378ed9c", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a20270da62abd5c22c977081e1c4a11b9f96e39d56254711edb21f7c5cbe7f5b", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc4be89bd9b2977d59fa51e1ef72b0522d2aad116e7dcb531ba0b214854d878b", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_org_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a088c55bad0b8b4a1c5e19024ed922ff4388ab92b3edcdcd91353b27cdf3d05a", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_ha_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48017730eff1e3a04c47c9187488eb915b18ef9293ca06b9dd49832eb2877856", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_org_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b6e5bff028d0bd999f8b7a6998e59451c7bd4e5c0de63953ddd19ee13fafad5", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e3659af27a8a5b26b5ff95eb407a1d0b393a74f22b8e9a66a762b75565dab69", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_identity_federation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "279f42f668b563bf22aa79be77fd3b90de7dab0a8bccbfe4773900acf820e64c", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fe8a0e6ce5aefad01638e493dda3964019360e0c04cd6aa3b1e432d586b00d9", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_org_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5766c6c0c639b848c6ce3007533059d570f66f9c48f477e0e915b3f5c90a96ae", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "324308668d15dbc88894b4aea9f9838093d3dd38e09307c94038cff4d289a614", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_gateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "036c8706d7ff3ca3a5b970e0fa4d10ba4d33f7cfcfa160acf868d5bb9dc51eaf", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_org_container.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70b7ca86ed2853ca6fa99a5dc712770e342d1d5b95fea154a67d7c7db3df3c8c", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_dns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccc1e086f899279cce569309c183b358bce0620446fbc8602c94f3be1f5b793f", + "format": 1 + }, + { + "name": "plugins/modules/na_sg_grid_regions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7aa2472c25390e20db5437cb23e2824546854c18033b7e92e77eb627b92981a7", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba13a350ade8ef804336f888d5883b8e54f8bddfb9d0fadc10277a8ca6540f4e", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d18988875cf9d824a5089d062935b2750d96e99e47925a2eb62c527d92c9cc13", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68a61b1d58a722f4ffabaa28da01c9837c93a582ea41c1bfb1c1fd54ea2d8fab", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_dns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a96a6d0218e6e7764786af0bf8dc8fbb24c3b492d57a627a7cf827bb33b2c327", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_regions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "870928ef29b01babed679fb5d207eb997ed724aa97135528f45cfda0314705c3", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_org_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff42467a461b8a69a2a2895242852444c89e0471bc0101ae10f99bbcf54528cb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b713f3a28e2948c42996d06cc69f316ed2576999d2372ed354c474e0a52472e", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_org_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7fa1a8212a01dd81411129c7a31048d3e154c9729a0ac28a8016ace1fbd6e87", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_ntp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7299330b0ddfa005c4976c10b3c564b4c84c5f3cc620d566f9baa0ebe63460b1", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15ad7caefc2c6fc3c9e473d176e032473f2065dfb249f3524263ec6129b61e6c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_identity_federation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3943d740fc467010966238cce4d10aa484ea70d6e689704043cee7344e1916b5", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_gateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c87e92555fb3aafaa82e197910b8fa61976ce358e220da2e0968469ea0df505c", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_client_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ab3f4d2b408d5943dfb334ed64729ba5b9a2974c78ef1c2cb48622289181e52", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d046329c9ba078773c442a8bc47b00c76b49eac762f05f1b43789689056e295a", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_org_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d167cae9bf3aebf2b3b8b6b2736701a37e0c86ebae8ad89223deca373d4e4dd", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_account.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eebfa82a09b7b3413d77749c1afc10a69cde0d90e940b0d454af807cebc476be", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_org_identity_federation.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cda86d8e2632609873d7ccda496ca69e0884ba82712418e23148025f0f3a5482", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_traffic_classes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ec3ad3f3716e69cd39a3dddbbffc1796d415b4a8a729f5d61289a3f39967b18", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_ha_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0782c6e1a6a9185cf4724da8d7e4363f6af13934efd4152779fcbe9aa05c2b7", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_org_container.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9593869384f8f9f0f0de69e63f72a90de414e858dbfead681966e54b9ebd4b23", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_org_user_s3_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d39947658ff5fdbc740275959f4fc82fcbdfd84553b771d78632d461e1638b4f", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_sg_grid_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "614c50a7ef9b7fe296025441e0350b78cb5e381f04d336f1cb49512d60404605", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe82353b93dc1bedb4d4f8695b23d6a33537fd0401c406547d132d4497ff7ca2", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/github-66.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49dfee10361876355f72737bd6f8d86ce6cea258bac589e20ec31125d6d4023a", + "format": 1 + }, + { + "name": "changelogs/fragments/github-10.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82584dd77f4f48618a3ae519fdfe9347774691201e4fd98868d21cc782820ecd", + "format": 1 + }, + { + "name": "changelogs/fragments/20.10.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2567ca0de5c5aa2b12772cf077003ea361b46b046e7fff165fbfb901dc86ff24", + "format": 1 + }, + { + "name": "changelogs/fragments/21.9.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b2062ed359ef484360534a91969fdb39e59f741cd5aa96b18e08de79bef81f1", + "format": 1 + }, + { + "name": "changelogs/fragments/20.7.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e197a5a1f5b3a2e38dfd7456429e59a3a01a8f80e86e445ff1b7d5b5acf7dc3e", + "format": 1 + }, + { + "name": "changelogs/fragments/github-8.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e99536aa20608eaee59a0f0bc586425750a4561b39b78fa50722be660f4333a3", + "format": 1 + }, + { + "name": "changelogs/fragments/21.11.1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c392f498059c8b2db2e7756553dde51d153bb7003f22c379641746e0bcf26188", + "format": 1 + }, + { + "name": "changelogs/fragments/20.6.1.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54144fc957437d09f4efd29f0dbfd18cfe40e21d7196e1c0d9dca35acc644a70", + "format": 1 + }, + { + "name": "changelogs/fragments/21.6.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f058dcc3961d0dc37d26b6a3ea6aefd477378bb51f8bdbe0595c2bf1c145b73", + "format": 1 + }, + { + "name": "changelogs/fragments/21.11.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b05d16dd6f1c82d0b43033d464d48f2c487cc90195fd1f0a8d79c4b8f97560a5", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4416.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6742492ce83786ffcddc12818ef3771ef915123fbe3b0518a101044435701af3", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c330af70628d6a33029dadb8c4e4aac81eb5e82946651f804cff46bd0736cbe", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d6b62b96b1a730bbb09d926e37a0dc4f1d3cf9f4218e8a2feb4a00c30c66171", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1fb8b745f8becd6ea2348808e1979a31486ab9357ec1197cb3136c2727d712b2", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c53a65c2fd561c87eaabf1072ef5dcab8653042bc15308465f52413585eb6271", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "faf067634d432b31207f4ad48bdc037d2ec518a7df4377cb0533126fe9a50a21", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07c19a69adcb2c9c482cadf8785c3bc0bf621ff161a592b48db9458e0673b1c5", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "131e7c4ca2ddb153d23af2174d262f4b0ae62079e892ecf36f062e929d43f729", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/storagegrid/MANIFEST.json b/ansible_collections/netapp/storagegrid/MANIFEST.json new file mode 100644 index 000000000..53238fc0d --- /dev/null +++ b/ansible_collections/netapp/storagegrid/MANIFEST.json @@ -0,0 +1,32 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "storagegrid", + "version": "21.11.1", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "storage", + "netapp", + "storagegrid" + ], + "description": "NetApp StorageGRID Collection", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/netapp.storagegrid", + "documentation": null, + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": null + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9021a8a169b205830d9914712b6c039123456973f905ab635b85e37192f132c", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/storagegrid/README.md b/ansible_collections/netapp/storagegrid/README.md new file mode 100644 index 000000000..582a77329 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/README.md @@ -0,0 +1,199 @@ +![example workflow](https://github.com/ansible-collections/netapp.storagegrid/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.storagegrid/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.storagegrid) +[![Discord](https://img.shields.io/discord/855068651522490400)](https://discord.gg/NetApp) + + +============================================================= + + netapp.storagegrid + + NetApp StorageGRID Collection + + Copyright (c) 2020 NetApp, Inc. All rights reserved. + Specifications subject to change without notice. + +============================================================= + +# Installation + +```bash +ansible-galaxy collection install netapp.storagegrid +``` +To use this collection add the following to the top of your playbook. +``` +collections: + - netapp.storagegrid +``` + +# Usage + +Each of the StorageGRID modules require an `auth_token` parameter to be specified. This can be obtained by executing a `uri` task against the StorageGRID Authorization API endpoint and registering the output as the first item in a Playbook. + +If you are performing a Tenant operation, ensure that the `accountId` parameter is also specified in the URI body and set to the Tenant Account ID. For example, `"accountId": "01234567890123456789"` + +```yaml +- name: Get Grid Authorization token + uri: + url: "https://sgadmin.example.com/api/v3/authorize" + method: POST + body: { + "username": "root", + "password": "storagegrid123", + "cookie": false, + "csrfToken": false + } + body_format: json + validate_certs: false + register: auth +``` + +Subsequent tasks can leverage the registered auth token. + +```yaml +- name: Create a StorageGRID Tenant Account + netapp.storagegrid.na_sg_grid_account: + api_url: "https://sgadmin.example.com" + auth_token: "{{ auth.json.data }}" + validate_certs: false + state: present + name: AnsibleTenant + protocol: s3 + management: true + use_own_identity_source: true + allow_platform_services: true + password: "mytenantrootpassword" + quota_size: 10 +``` + +# Versioning + +[Releasing, Versioning and Deprecation](https://github.com/ansible-collections/netapp/issues/93) + +# Need help + +Join our [Discord](https://discord.gg/NetApp) + +# Code of Conduct + +This collection follows the [Ansible project's Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). + +# Release Notes + +## 21.11.1 + +### Bug Fixes + - na_sg_org_container - fix versioning not enabled on initial bucket creation. + +## 21.11.0 + +### Minor Changes + - na_sg_org_container - supports versioning configuration for S3 buckets available in StorageGRID 11.6+. + +### New Modules + - na_sg_grid_client_certificate - Manage Client Certificates on StorageGRID. + +## 21.10.0 + +### Minor Changes + - na_sg_grid_gateway - supports specifying HA Groups by name or UUID. + +### Bug Fixes + - na_sg_org_group - fixed behaviour where update to ``s3_policy`` is ignored if ``management_policy`` is set. + +### New Modules + - na_sg_grid_ha_group - Manage high availability (HA) group configuration on StorageGRID. + - na_sg_grid_traffic_classes - Manage Traffic Classification Policy configuration on StorageGRID. + +## 21.9.0 + +### Minor Changes + - na_sg_grid_gateway - supports load balancer endpoint binding available in StorageGRID 11.5+. + - na_sg_org_container - supports creation of S3 Object Lock buckets available in StorageGRID 11.5+. + +### Bug Fixes + - na_sg_grid_gateway - existing endpoints matched by ``name`` and ``port``. + - na_sg_grid_account - minor documentation fix. + +## 21.8.0 + +### Minor Changes + - all modules - enable usage of Ansible module group defaults - for Ansible 2.12+. + +## 21.7.0 + +### New Modules + +- na_sg_grid_gateway: Manage Load balancer (gateway) endpoints + +### Minor Changes +- Updated documentation - added RETURN block for each module + +## 21.6.0 + +### New Modules + +- na_sg_grid_certificate: Manage the Storage API and Grid Management certificates on StorageGRID. +- na_sg_grid_identity_federation: Manage Grid identity federation. +- na_sg_org_identity_federation: Manage Tenant identity federation. + +### Minor Changes +- na_sg_org_container - supports deletion of buckets when `state` is set to `absent`. + +### Bug Fixes +- na_sg_org_container - fix issue with applying compliance settings on buckets. + +## 20.11.0 + +### New Modules + +- na_sg_grid_info: Gather StorageGRID Grig subset information +- na_sg_org_info: Gather StorageGRID Org subset information + +### Minor Changes + +- na_sg_grid_account: new option `root_access_account` for granting initial root access permissions for the tenant to an existing federated group + +## 20.10.0 + +### Breaking Changes + +This version introduces a breaking change. All modules have been renamed from `nac_sg_*` to `na_sg_*`. Playbooks and Roles must be updated to match. + +### Bug Fixes + +- na_sg_grid_account: fixed documentation issue. +- na_sg_grid_account: added `no_log` flag to password fields +- na_sg_grid_group: fixed group name parsing +- na_sg_org_group: fixed group name parsing + +### New Options + +- na_sg_grid_account: new option `update_password` for managing Tenant Account root password changes +- na_sg_org_user: new option `password` and `update_password` for setting or updating Tenant User passwords +- na_sg_grid_user: new option `password` and `update_password` for setting or updating Grid Admin User passwords + +## 20.6.1 + +### Minor Changes +- Fixed documentation issue in README.md + +### Bug Fixes +- nac_sg_org_container: fixed documentation issue. + +## 20.6.0 + +Initial release of NetApp StorageGRID Ansible modules + +### New Modules + +- nac_sg_grid_account: create/modify/delete Tenant account +- nac_sg_grid_dns: set Grid DNS servers +- nac_sg_grid_group: create/modify/delete Grid admin group +- nac_sg_grid_ntp: set Grid NTP servers +- nac_sg_grid_regions: set Grid Regions +- nac_sg_grid_user: create/modify/delete Grid admin user +- nac_sg_org_container: create S3 bucket +- nac_sg_org_group: create/modify/delete Tenant group +- nac_sg_org_user: create/modify/delete Tenant user +- nac_sg_org_user_s3_key: create/delete S3 key + diff --git a/ansible_collections/netapp/storagegrid/changelogs/changelog.yaml b/ansible_collections/netapp/storagegrid/changelogs/changelog.yaml new file mode 100644 index 000000000..288cbdc34 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/changelog.yaml @@ -0,0 +1,171 @@ +ancestor: null +releases: + 20.10.0: + changes: + breaking_changes: + - 'This version introduces a breaking change. + + All modules have been renamed from ``nac_sg_*`` to ``na_sg_*``. + + Playbooks and Roles must be updated to match.' + bugfixes: + - na_sg_grid_account - added ``no_log`` flag to password fields. + - na_sg_grid_account - fixed documentation issue. + - na_sg_grid_group - fixed group name parsing. + - na_sg_org_group - fixed group name parsing. + minor_changes: + - na_sg_grid_account - new option ``update_password`` for managing Tenant Account + root password changes. + - na_sg_grid_user - new option ``password`` and ``update_password`` for setting + or updating Grid Admin User passwords. + - na_sg_org_user - new option ``password`` and ``update_password`` for setting + or updating Tenant User passwords. + fragments: + - 20.10.0.yaml + release_date: '2020-10-15' + 20.11.0: + changes: + minor_changes: + - na_sg_grid_account - New option ``root_access_account`` for granting initial + root access permissions for the tenant to an existing federated group + fragments: + - github-66.yaml + modules: + - description: NetApp StorageGRID Grid information gatherer + name: na_sg_grid_info + namespace: '' + - description: NetApp StorageGRID Org information gatherer + name: na_sg_org_info + namespace: '' + release_date: '2020-11-18' + 20.6.0: + modules: + - description: NetApp StorageGRID Manage Tenant account. + name: nac_sg_grid_account + namespace: '' + - description: NetApp StorageGRID Manage Grid DNS servers. + name: nac_sg_grid_dns + namespace: '' + - description: NetApp StorageGRID Manage Grid admin group. + name: nac_sg_grid_group + namespace: '' + - description: NetApp StorageGRID Manage Grid NTP servers. + name: nac_sg_grid_ntp + namespace: '' + - description: NetApp StorageGRID Manage Grid Regions. + name: nac_sg_grid_regions + namespace: '' + - description: NetApp StorageGRID Manage Grid admin user. + name: nac_sg_grid_user + namespace: '' + - description: NetApp StorageGRID Manage S3 bucket. + name: nac_sg_org_container + namespace: '' + - description: NetApp StorageGRID Manage Tenant group. + name: nac_sg_org_group + namespace: '' + - description: NetApp StorageGRID Manage Tenant user. + name: nac_sg_org_user + namespace: '' + - description: NetApp StorageGRID Manage S3 key. + name: nac_sg_org_user_s3_key + namespace: '' + release_date: '2020-06-09' + 20.6.1: + changes: + bugfixes: + - nac_sg_org_container - fixed documentation issue. + minor_changes: + - Fixed documentation issue in README.md + fragments: + - 20.6.1.yaml + release_date: '2020-06-09' + 21.10.0: + changes: + bugfixes: + - na_sg_org_group - fixed behaviour where update to ``s3_policy`` is ignored + if ``management_policy`` is set. + minor_changes: + - na_sg_grid_gateway - supports specifying HA Groups by name or UUID. + fragments: + - github-10.yaml + - github-8.yaml + modules: + - description: Manage high availability (HA) group configuration on StorageGRID. + name: na_sg_grid_ha_group + namespace: '' + - description: Manage Traffic Classification Policy configuration on StorageGRID. + name: na_sg_grid_traffic_classes + namespace: '' + release_date: '2022-03-17' + 21.11.0: + changes: + minor_changes: + - na_sg_org_container - supports versioning configuration for S3 buckets available + in StorageGRID 11.6+. + fragments: + - 21.11.0.yaml + modules: + - description: Manage Client Certificates on StorageGRID. + name: na_sg_grid_client_certificate + namespace: '' + release_date: '2022-09-06' + 21.11.1: + changes: + bugfixes: + - na_sg_org_container - fix versioning not enabled on initial bucket creation. + fragments: + - 21.11.1.yaml + release_date: '2022-09-23' + 21.6.0: + changes: + bugfixes: + - na_sg_org_container - fix issue with applying compliance settings on buckets. + minor_changes: + - na_sg_org_container - supports deletion of buckets when ``state`` is set to + ``absent``. + fragments: + - 21.6.0.yaml + modules: + - description: Manage the Storage API and Grid Management certificates on StorageGRID. + name: na_sg_grid_certificate + namespace: '' + - description: NetApp StorageGRID manage Grid identity federation. + name: na_sg_grid_identity_federation + namespace: '' + - description: NetApp StorageGRID manage Tenant identity federation. + name: na_sg_org_identity_federation + namespace: '' + release_date: '2021-06-16' + 21.7.0: + changes: + minor_changes: + - Updated documentation - added RETURN block for each module + fragments: + - 20.7.0.yaml + modules: + - description: Manage Load balancer (gateway) endpoints on StorageGRID. + name: na_sg_grid_gateway + namespace: '' + release_date: '2021-10-05' + 21.8.0: + changes: + minor_changes: + - PR2 - allow usage of Ansible module group defaults - for Ansible 2.12+. + fragments: + - DEVOPS-4416.yaml + release_date: '2021-11-11' + 21.9.0: + changes: + bugfixes: + - na_sg_grid_account - minor documentation fix. + - na_sg_grid_gateway - existing endpoints matched by ``name`` and ``port``. + minor_changes: + - PR2 - allow usage of Ansible module group defaults - for Ansible 2.12+. + - na_sg_grid_gateway - supports load balancer endpoint binding available in + StorageGRID 11.5+. + - na_sg_org_container - supports creation of S3 Object Lock buckets available + in StorageGRID 11.5+. + fragments: + - 21.9.0.yaml + release_date: '2021-12-17' diff --git a/ansible_collections/netapp/storagegrid/changelogs/config.yaml b/ansible_collections/netapp/storagegrid/changelogs/config.yaml new file mode 100644 index 000000000..871e634c1 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: NetApp StorageGRID Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/20.10.0.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/20.10.0.yaml new file mode 100644 index 000000000..350ee15dc --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/20.10.0.yaml @@ -0,0 +1,14 @@ +breaking_changes: + - | + This version introduces a breaking change. + All modules have been renamed from ``nac_sg_*`` to ``na_sg_*``. + Playbooks and Roles must be updated to match. +minor_changes: + - na_sg_grid_account - new option ``update_password`` for managing Tenant Account root password changes. + - na_sg_org_user - new option ``password`` and ``update_password`` for setting or updating Tenant User passwords. + - na_sg_grid_user - new option ``password`` and ``update_password`` for setting or updating Grid Admin User passwords. +bugfixes: + - na_sg_grid_account - fixed documentation issue. + - na_sg_grid_account - added ``no_log`` flag to password fields. + - na_sg_grid_group - fixed group name parsing. + - na_sg_org_group - fixed group name parsing. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/20.6.1.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/20.6.1.yaml new file mode 100644 index 000000000..3d1bb11a0 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/20.6.1.yaml @@ -0,0 +1,4 @@ +minor_changes: + - Fixed documentation issue in README.md +bugfixes: + - nac_sg_org_container - fixed documentation issue. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/20.7.0.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/20.7.0.yaml new file mode 100644 index 000000000..a91168e87 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/20.7.0.yaml @@ -0,0 +1,2 @@ +minor_changes: + - Updated documentation - added RETURN block for each module diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.0.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.0.yaml new file mode 100644 index 000000000..a8fbff2df --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.0.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_sg_org_container - supports versioning configuration for S3 buckets available in StorageGRID 11.6+. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.1.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.1.yaml new file mode 100644 index 000000000..229a6c51f --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.11.1.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_sg_org_container - fix versioning not enabled on initial bucket creation. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/21.6.0.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.6.0.yaml new file mode 100644 index 000000000..6f8a660eb --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.6.0.yaml @@ -0,0 +1,4 @@ +minor_changes: + - na_sg_org_container - supports deletion of buckets when ``state`` is set to ``absent``. +bugfixes: + - na_sg_org_container - fix issue with applying compliance settings on buckets. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/21.9.0.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.9.0.yaml new file mode 100644 index 000000000..87aecb16c --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/21.9.0.yaml @@ -0,0 +1,6 @@ +minor_changes: + - na_sg_grid_gateway - supports load balancer endpoint binding available in StorageGRID 11.5+. + - na_sg_org_container - supports creation of S3 Object Lock buckets available in StorageGRID 11.5+. +bugfixes: + - na_sg_grid_gateway - existing endpoints matched by ``name`` and ``port``. + - na_sg_grid_account - minor documentation fix. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/DEVOPS-4416.yaml new file mode 100644 index 000000000..63514c5c5 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/DEVOPS-4416.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR2 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/github-10.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/github-10.yaml new file mode 100644 index 000000000..62e8845c6 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/github-10.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_sg_grid_gateway - supports specifying HA Groups by name or UUID. \ No newline at end of file diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/github-66.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/github-66.yaml new file mode 100644 index 000000000..a515be8c2 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/github-66.yaml @@ -0,0 +1,2 @@ +minor_changes: + - na_sg_grid_account - New option ``root_access_account`` for granting initial root access permissions for the tenant to an existing federated group diff --git a/ansible_collections/netapp/storagegrid/changelogs/fragments/github-8.yaml b/ansible_collections/netapp/storagegrid/changelogs/fragments/github-8.yaml new file mode 100644 index 000000000..f21ecbe46 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/changelogs/fragments/github-8.yaml @@ -0,0 +1,2 @@ +bugfixes: + - na_sg_org_group - fixed behaviour where update to ``s3_policy`` is ignored if ``management_policy`` is set. diff --git a/ansible_collections/netapp/storagegrid/meta/runtime.yml b/ansible_collections/netapp/storagegrid/meta/runtime.yml new file mode 100644 index 000000000..4625ad259 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/meta/runtime.yml @@ -0,0 +1,23 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_storagegrid: + - na_sg_grid_account + - na_sg_grid_certificate + - na_sg_grid_client_certificate + - na_sg_grid_dns + - na_sg_grid_gateway + - na_sg_grid_group + - na_sg_grid_ha_group + - na_sg_grid_identity_federation + - na_sg_grid_info + - na_sg_grid_ntp + - na_sg_grid_regions + - na_sg_grid_traffic_classes + - na_sg_grid_user + - na_sg_org_container + - na_sg_org_group + - na_sg_org_identity_federation + - na_sg_org_info + - na_sg_org_user + - na_sg_org_user_s3_key diff --git a/ansible_collections/netapp/storagegrid/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/storagegrid/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..8c06f9d99 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/doc_fragments/netapp.py @@ -0,0 +1,41 @@ +# Copyright: (c) 2019, NetApp Ansible Team +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r""" +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - This is documentation for NetApp's StorageGRID modules. +""" + + # Documentation fragment for StorageGRID + SG = """ +options: + auth_token: + required: true + type: str + description: + - The authorization token for the API request + api_url: + required: true + type: str + description: + - The url to the StorageGRID Admin Node REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool +notes: + - The modules prefixed with C(na_sg) are built to manage NetApp StorageGRID. +""" diff --git a/ansible_collections/netapp/storagegrid/plugins/module_utils/netapp.py b/ansible_collections/netapp/storagegrid/plugins/module_utils/netapp.py new file mode 100644 index 000000000..9892ae7b5 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/module_utils/netapp.py @@ -0,0 +1,211 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020, NetApp Ansible Team +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import mimetypes +import os +import random + +from pprint import pformat +from ansible.module_utils import six +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError +from ansible.module_utils.urls import open_url +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils._text import to_native + +COLLECTION_VERSION = "21.11.1" + +try: + import requests + + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + +import ssl + +try: + from urlparse import urlparse, urlunparse +except ImportError: + from urllib.parse import urlparse, urlunparse + + +POW2_BYTE_MAP = dict( + # Here, 1 kb = 1024 + bytes=1, + b=1, + kb=1024, + mb=1024**2, + gb=1024**3, + tb=1024**4, + pb=1024**5, + eb=1024**6, + zb=1024**7, + yb=1024**8, +) + + +def na_storagegrid_host_argument_spec(): + + return dict( + api_url=dict(required=True, type="str"), + validate_certs=dict(required=False, type="bool", default=True), + auth_token=dict(required=True, type="str", no_log=True), + ) + + +class SGRestAPI(object): + def __init__(self, module, timeout=60): + self.module = module + self.auth_token = self.module.params["auth_token"] + self.api_url = self.module.params["api_url"] + self.verify = self.module.params["validate_certs"] + self.timeout = timeout + self.check_required_library() + self.sg_version = dict(major=-1, minor=-1, full="", valid=False) + + def check_required_library(self): + if not HAS_REQUESTS: + self.module.fail_json(msg=missing_required_lib("requests")) + + def send_request(self, method, api, params, json=None): + """send http request and process reponse, including error conditions""" + url = "%s/%s" % (self.api_url, api) + status_code = None + content = None + json_dict = None + json_error = None + error_details = None + headers = { + "Content-type": "application/json", + "Authorization": self.auth_token, + "Cache-Control": "no-cache", + } + + def get_json(response): + """extract json, and error message if present""" + try: + json = response.json() + + except ValueError: + return None, None + success_code = [200, 201, 202, 204] + if response.status_code not in success_code: + error = json.get("message") + else: + error = None + return json, error + + try: + response = requests.request( + method, + url, + headers=headers, + timeout=self.timeout, + json=json, + verify=self.verify, + params=params, + ) + status_code = response.status_code + # If the response was successful, no Exception will be raised + json_dict, json_error = get_json(response) + except requests.exceptions.HTTPError as err: + __, json_error = get_json(response) + if json_error is None: + error_details = str(err) + except requests.exceptions.ConnectionError as err: + error_details = str(err) + except Exception as err: + error_details = str(err) + if json_error is not None: + error_details = json_error + + return json_dict, error_details + + # If an error was reported in the json payload, it is handled below + def get(self, api, params=None): + method = "GET" + return self.send_request(method, api, params) + + def post(self, api, data, params=None): + method = "POST" + return self.send_request(method, api, params, json=data) + + def patch(self, api, data, params=None): + method = "PATCH" + return self.send_request(method, api, params, json=data) + + def put(self, api, data, params=None): + method = "PUT" + return self.send_request(method, api, params, json=data) + + def delete(self, api, data, params=None): + method = "DELETE" + return self.send_request(method, api, params, json=data) + + def get_sg_product_version(self, api_root="grid"): + method = "GET" + api = "api/v3/%s/config/product-version" % api_root + message, error = self.send_request(method, api, params={}) + if error: + self.module.fail_json(msg=error) + self.set_version(message) + + def set_version(self, message): + try: + product_version = message.get("data", "not found").get("productVersion", "not_found") + except AttributeError: + self.sg_version["valid"] = False + return + + self.sg_version["major"], self.sg_version["minor"] = list(map(int, product_version.split(".")[0:2])) + self.sg_version["full"] = product_version + self.sg_version["valid"] = True + + def get_sg_version(self): + if self.sg_version["valid"]: + return self.sg_version["major"], self.sg_version["minor"] + return -1, -1 + + def meets_sg_minimum_version(self, minimum_major, minimum_minor): + return self.get_sg_version() >= (minimum_major, minimum_minor) + + def requires_sg_version(self, module_or_option, version): + return "%s requires StorageGRID %s or later." % (module_or_option, version) + + def fail_if_not_sg_minimum_version(self, module_or_option, minimum_major, minimum_minor): + version = self.get_sg_version() + if version < (minimum_major, minimum_minor): + msg = "Error: " + self.requires_sg_version(module_or_option, "%d.%d" % (minimum_major, minimum_minor)) + msg += " Found: %s.%s." % version + self.module.fail_json(msg=msg) diff --git a/ansible_collections/netapp/storagegrid/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/storagegrid/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..f562938cf --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/module_utils/netapp_module.py @@ -0,0 +1,237 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2018, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" Support class for NetApp ansible modules """ + +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +__metaclass__ = type + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils + + +def cmp(obj1, obj2): + """ + Python 3 does not have a cmp function, this will do the cmp. + :param obj1: first object to check + :param obj2: second object to check + :return: + """ + # convert to lower case for string comparison. + if obj1 is None: + return -1 + if isinstance(obj1, str) and isinstance(obj2, str): + obj1 = obj1.lower() + obj2 = obj2.lower() + # if list has string element, convert string to lower case. + if isinstance(obj1, list) and isinstance(obj2, list): + obj1 = [x.lower() if isinstance(x, str) else x for x in obj1] + obj2 = [x.lower() if isinstance(x, str) else x for x in obj2] + obj1.sort() + obj2.sort() + return (obj1 > obj2) - (obj1 < obj2) + + +class NetAppModule(object): + """ + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + """ + + def __init__(self): + self.log = list() + self.changed = False + self.parameters = {"name": "not initialized"} + + def set_parameters(self, ansible_params): + self.parameters = dict() + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters + + def get_cd_action(self, current, desired): + """ takes a desired state and a current state, and return an action: + create, delete, None + eg: + is_present = 'absent' + some_object = self.get_object(source) + if some_object is not None: + is_present = 'present' + action = cd_action(current=is_present, desired = self.desired.state()) + """ + if "state" in desired: + desired_state = desired["state"] + else: + desired_state = "present" + + if current is None and desired_state == "absent": + return None + if current is not None and desired_state == "present": + return None + # change in state + self.changed = True + if current is not None: + return "delete" + return "create" + + def compare_and_update_values(self, current, desired, keys_to_compare): + updated_values = dict() + is_changed = False + for key in keys_to_compare: + if key in current: + if key in desired and desired[key] is not None: + if current[key] != desired[key]: + updated_values[key] = desired[key] + is_changed = True + else: + updated_values[key] = current[key] + else: + updated_values[key] = current[key] + + return updated_values, is_changed + + @staticmethod + def check_keys(current, desired): + ''' TODO: raise an error if keys do not match + with the exception of: + new_name, state in desired + ''' + + def is_rename_action(self, source, target): + """ takes a source and target object, and returns True + if a rename is required + eg: + source = self.get_object(source_name) + target = self.get_object(target_name) + action = is_rename_action(source, target) + :return: None for error, True for rename action, False otherwise + """ + if source is None and target is None: + # error, do nothing + # cannot rename an non existent resource + # alternatively we could create B + return None + if source is not None and target is not None: + # error, do nothing + # idempotency (or) new_name_is_already_in_use + # alternatively we could delete B and rename A to B + return False + if source is None and target is not None: + # do nothing, maybe the rename was already done + return False + # source is not None and target is None: + # rename is in order + self.changed = True + return True + + @staticmethod + def compare_lists(current, desired, get_list_diff): + ''' compares two lists and return a list of elements that are either the desired elements or elements that are + modified from the current state depending on the get_list_diff flag + :param: current: current item attribute in ONTAP + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: list of attributes to be modified + :rtype: list + ''' + current_copy = deepcopy(current) + desired_copy = deepcopy(desired) + + # get what in desired and not in current + desired_diff_list = list() + for item in desired: + if item in current_copy: + current_copy.remove(item) + else: + desired_diff_list.append(item) + + # get what in current but not in desired + current_diff_list = [] + for item in current: + if item in desired_copy: + desired_copy.remove(item) + else: + current_diff_list.append(item) + + if desired_diff_list or current_diff_list: + # there are changes + if get_list_diff: + return desired_diff_list + else: + return desired + else: + return None + + def get_modified_attributes(self, current, desired, get_list_diff=False): + ''' takes two dicts of attributes and return a dict of attributes that are + not in the current state + It is expected that all attributes of interest are listed in current and + desired. + :param: current: current attributes on StorageGRID + :param: desired: attributes from playbook + :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute + :return: dict of attributes to be modified + :rtype: dict + NOTE: depending on the attribute, the caller may need to do a modify or a + different operation (eg move volume if the modified attribute is an + aggregate name) + ''' + # if the object does not exist, we can't modify it + modified = {} + if current is None: + return modified + + # error out if keys do not match + self.check_keys(current, desired) + + # collect changed attributes + for key, value in current.items(): + if key in desired and desired[key] is not None: + if isinstance(value, list): + modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired + if modified_list is not None: + modified[key] = modified_list + elif isinstance(value, dict): + modified_dict = self.get_modified_attributes(value, desired[key]) + if modified_dict: + modified[key] = modified_dict + else: + try: + result = cmp(value, desired[key]) + except TypeError as exc: + raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key]))) + else: + if result != 0: + modified[key] = desired[key] + if modified: + self.changed = True + return modified diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_account.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_account.py new file mode 100644 index 000000000..88943c082 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_account.py @@ -0,0 +1,458 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Accounts""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_grid_account +short_description: NetApp StorageGRID manage accounts. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Tenant Accounts on NetApp StorageGRID. +options: + state: + description: + - Whether the specified account should exist or not. + - Required for all operations. + type: str + choices: ['present', 'absent'] + default: present + name: + description: + - Name of the tenant. + - Required for create or modify operation. + type: str + account_id: + description: + - Account Id of the tenant. + - May be used for modify or delete operation. + type: str + protocol: + description: + - Object Storage protocol used by the tenancy. + - Required for create operation. + type: str + choices: ['s3', 'swift'] + management: + description: + - Whether the tenant can login to the StorageGRID tenant portal. + type: bool + default: true + use_own_identity_source: + description: + - Whether the tenant account should configure its own identity source. + type: bool + allow_platform_services: + description: + - Allows tenant to use platform services features such as CloudMirror. + type: bool + root_access_group: + description: + - Existing federated group to have initial Root Access permissions for the tenant. + - Must begin with C(federated-group/) + type: str + version_added: 20.11.0 + quota_size: + description: + - Quota to apply to the tenant specified in I(quota_size_unit). + - If you intend to have no limits, assign C(0). + type: int + default: 0 + quota_size_unit: + description: + - The unit used to interpret the size parameter. + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + type: str + default: 'gb' + password: + description: + - Root password for tenant account. + - Requires root privilege. + type: str + update_password: + description: + - Choose when to update the password. + - When set to C(always), the password will always be updated. + - When set to C(on_create) the password will only be set upon a new user creation. + default: on_create + choices: + - on_create + - always + type: str +""" + +EXAMPLES = """ + - name: create a tenant account + netapp.storagegrid.na_sg_grid_account: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: storagegrid-tenant-1 + protocol: s3 + management: true + use_own_identity_source: false + allow_platform_services: false + password: "tenant-password" + quota_size: 0 + + - name: update a tenant account + netapp.storagegrid.na_sg_grid_account: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: storagegrid-tenant-1 + protocol: s3 + management: true + use_own_identity_source: false + allow_platform_services: true + password: "tenant-password" + quota_size: 10240 + + - name: delete a tenant account + netapp.storagegrid.na_sg_grid_account: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: absent + name: storagegrid-tenant-1 + protocol: s3 +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID tenant account. + returned: success + type: dict + sample: { + "name": "Example Account", + "capabilities": ["management", "s3"], + "policy": { + "useAccountIdentitySource": true, + "allowPlatformServices": false, + "quotaObjectBytes": 100000000000 + }, + "id": "12345678901234567890" + } +""" + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import ( + NetAppModule, +) +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import ( + SGRestAPI, +) + + +class SgGridAccount(object): + """ + Create, modify and delete StorageGRID Tenant Account + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + name=dict(required=False, type="str"), + account_id=dict(required=False, type="str"), + protocol=dict(required=False, choices=["s3", "swift"]), + management=dict(required=False, type="bool", default=True), + use_own_identity_source=dict(required=False, type="bool"), + allow_platform_services=dict(required=False, type="bool"), + root_access_group=dict(required=False, type="str"), + quota_size=dict(required=False, type="int", default=0), + quota_size_unit=dict( + default="gb", + choices=[ + "bytes", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "eb", + "zb", + "yb", + ], + type="str", + ), + password=dict(required=False, type="str", no_log=True), + update_password=dict( + default="on_create", choices=["on_create", "always"] + ), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[ + ( + "state", + "present", + [ + "name", + "protocol", + "use_own_identity_source", + "allow_platform_services", + ], + ) + ], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + + # Checking for the parameters passed and create new parameters list + self.data = {} + self.data["name"] = self.parameters["name"] + self.data["capabilities"] = [self.parameters["protocol"]] + + if self.parameters.get("password") is not None: + self.data["password"] = self.parameters["password"] + + # Append "management" to the capability list only if parameter is True + if self.parameters.get("management"): + self.data["capabilities"].append("management") + + self.data["policy"] = {} + + if "use_own_identity_source" in self.parameters: + self.data["policy"]["useAccountIdentitySource"] = self.parameters[ + "use_own_identity_source" + ] + + if "allow_platform_services" in self.parameters: + self.data["policy"]["allowPlatformServices"] = self.parameters[ + "allow_platform_services" + ] + + if self.parameters.get("root_access_group") is not None: + self.data["grantRootAccessToGroup"] = self.parameters["root_access_group"] + + if self.parameters["quota_size"] > 0: + self.parameters["quota_size"] = ( + self.parameters["quota_size"] + * netapp_utils.POW2_BYTE_MAP[ + self.parameters["quota_size_unit"] + ] + ) + self.data["policy"]["quotaObjectBytes"] = self.parameters[ + "quota_size" + ] + elif self.parameters["quota_size"] == 0: + self.data["policy"]["quotaObjectBytes"] = None + + self.pw_change = {} + if self.parameters.get("password") is not None: + self.pw_change["password"] = self.parameters["password"] + + def get_tenant_account_id(self): + # Check if tenant account exists + # Return tenant account info if found, or None + api = "api/v3/grid/accounts?limit=350" + + list_accounts, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + for account in list_accounts.get("data"): + if account["name"] == self.parameters["name"]: + return account["id"] + + return None + + def get_tenant_account(self, account_id): + api = "api/v3/grid/accounts/%s" % account_id + account, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + else: + return account["data"] + return None + + def create_tenant_account(self): + api = "api/v3/grid/accounts" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_tenant_account(self, account_id): + api = "api/v3/grid/accounts/" + account_id + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def update_tenant_account(self, account_id): + api = "api/v3/grid/accounts/" + account_id + + if "password" in self.data: + del self.data["password"] + + if "grantRootAccessToGroup" in self.data: + del self.data["grantRootAccessToGroup"] + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def set_tenant_root_password(self, account_id): + api = "api/v3/grid/accounts/%s/change-password" % account_id + response, error = self.rest_api.post(api, self.pw_change) + + if error: + self.module.fail_json(msg=error["text"]) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + tenant_account = None + + if self.parameters.get("account_id"): + tenant_account = self.get_tenant_account( + self.parameters["account_id"] + ) + + else: + tenant_account_id = self.get_tenant_account_id() + if tenant_account_id: + tenant_account = self.get_tenant_account(tenant_account_id) + + cd_action = self.na_helper.get_cd_action( + tenant_account, self.parameters + ) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + capability_diff = [ + i + for i in self.data["capabilities"] + + tenant_account["capabilities"] + if i not in self.data["capabilities"] + or i not in tenant_account["capabilities"] + ] + + if self.parameters["quota_size"] > 0: + if ( + tenant_account["policy"]["quotaObjectBytes"] + != self.parameters["quota_size"] + ): + update = True + elif ( + self.parameters["quota_size"] == 0 + and tenant_account["policy"]["quotaObjectBytes"] is not None + ): + update = True + + if ( + "use_own_identity_source" in self.parameters + and tenant_account["policy"]["useAccountIdentitySource"] + != self.parameters["use_own_identity_source"] + ): + update = True + + elif ( + "allow_platform_services" in self.parameters + and tenant_account["policy"]["allowPlatformServices"] + != self.parameters["allow_platform_services"] + ): + update = True + + elif capability_diff: + update = True + + if update: + self.na_helper.changed = True + + result_message = "" + resp_data = tenant_account + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.delete_tenant_account(tenant_account["id"]) + result_message = "Tenant Account deleted" + resp_data = None + + elif cd_action == "create": + resp_data = self.create_tenant_account() + result_message = "Tenant Account created" + + else: + resp_data = self.update_tenant_account(tenant_account["id"]) + result_message = "Tenant Account updated" + + # If a password has been set + if self.pw_change: + if self.module.check_mode: + pass + else: + # Only update the password if update_password is always + # On a create action, the password is set directly by the POST /grid/accounts method + if self.parameters["update_password"] == "always" and cd_action != "create": + self.set_tenant_root_password(tenant_account["id"]) + self.na_helper.changed = True + + results = [result_message, "Tenant Account root password updated"] + result_message = "; ".join(filter(None, results)) + + self.module.exit_json( + changed=self.na_helper.changed, msg=result_message, resp=resp_data + ) + + +def main(): + """ + Main function + """ + na_sg_grid_account = SgGridAccount() + na_sg_grid_account.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_certificate.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_certificate.py new file mode 100644 index 000000000..97f9ab972 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_certificate.py @@ -0,0 +1,226 @@ +#!/usr/bin/python + +# (c) 2021, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Certificates""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_sg_grid_certificate +short_description: Manage the Storage API and Grid Management certificates on StorageGRID. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Set and update the Storage API and Grid Management certificates on NetApp StorageGRID. +options: + state: + description: + - Whether the specified certificate should be set. + type: str + choices: ['present', 'absent'] + default: present + type: + description: + - Which certificate to update. + type: str + choices: ['storage-api', 'management'] + required: true + server_certificate: + description: + - X.509 server certificate in PEM-encoding. + type: str + ca_bundle: + description: + - Intermediate CA certificate bundle in concatenated PEM-encoding. + - Omit if there is no intermediate CA. + type: str + private_key: + description: + - Certificate private key in PEM-encoding. + - Required if I(server_certificate) is specified. + type: str +""" + +EXAMPLES = """ + - name: set storage API certificate + netapp.storagegrid.na_sg_grid_certificate: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + type: storage-api + server_certificate: | + -----BEGIN CERTIFICATE----- + MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGEwJB + BAMMHnNnYW4wMS5kZXYubWljcm9icmV3Lm5ldGFwcC5hdTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMvjm9I35lmKcC7ITVL8+QiZ/klvdkbfZCUQrfdy + 71inP+XmPjs0rnkhICA9ItODteRcVlO+t7nDTfm7HgG0mJFkcJm0ffyEYrcx24qu + S7gXYQjRsJmrep1awoaCa20BMGuqK2WKI3IvZ7YiT22qkBqKJD+hIFffX6u3Jy+B + 77pR6YcATtpMHW/AaOx+OX9l80dIRsRZKMDxYQ== + -----END CERTIFICATE----- + private_key: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDL45vSN+ZZinAu + L25W0+cz1Oi69AKkI7d9nbFics2ay5+7o+4rKqf3en2R4MSxiJvy+iDlOmATib5O + x8TN5pJ9AgMBAAECggEADDLM8tHXXUoUFihzv+BUwff8p8YcbHcXFcSes+xTd5li + po8lNsx/v2pQx4ByBkuaYLZGIEXOWS6gkp44xhIXgQKBgQD4Hq7862u5HLbmhrV3 + vs8nC69b3QKBgQDacCD8d8JpwPbg8t2VjXM3UvdmgAaLUfU7O1DWV+W3jqzmDOoN + zWVgPbPNj0UmzvLDbgxLoxe77wjn2BHsAJVAfJ9VeQKBgGqFAegYO+wHR8lJUoa5 + ZEe8Upy2oBtvND/0dnwO2ym2FGsBJN0Gr4NKdG5vkzLsthKkcwRm0ikwEUOUZQKE + K8J5yEVeo9K2v3wggtq8fYn6 + -----END PRIVATE KEY----- + +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID server certificates. + returned: success + type: dict + sample: { + "serverCertificateEncoded": "-----BEGIN CERTIFICATE-----MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGE...-----END CERTIFICATE-----", + "caBundleEncoded": "-----BEGIN CERTIFICATE-----MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELM...-----END CERTIFICATE-----" + } +""" + +import json + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridCertificate: + """ + Update StorageGRID certificates + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + type=dict(required=True, type="str", choices=["storage-api", "management"]), + server_certificate=dict(required=False, type="str"), + ca_bundle=dict(required=False, type="str"), + private_key=dict(required=False, type="str", no_log=True), + ) + ) + + parameter_map = { + "server_certificate": "serverCertificateEncoded", + "ca_bundle": "caBundleEncoded", + "private_key": "privateKeyEncoded", + } + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["server_certificate", "private_key"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + + if self.parameters["state"] == "present": + for k in parameter_map.keys(): + if self.parameters.get(k) is not None: + self.data[parameter_map[k]] = self.parameters[k] + + self.module.fail_json + + def get_grid_certificate(self, cert_type): + api = "api/v3/grid/%s" % cert_type + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def update_grid_certificate(self, cert_type): + api = "api/v3/grid/%s/update" % cert_type + + response, error = self.rest_api.post(api, self.data) + if error: + self.module.fail_json(msg=error) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + cert_type = "" + cd_action = None + + if self.parameters.get("type") == "storage-api": + cert_type = "storage-api-certificate" + elif self.parameters.get("type") == "management": + cert_type = "management-certificate" + + cert_data = self.get_grid_certificate(cert_type) + + if cert_data["serverCertificateEncoded"] is None and cert_data["caBundleEncoded"] is None: + cd_action = self.na_helper.get_cd_action(None, self.parameters) + else: + cd_action = self.na_helper.get_cd_action(cert_data, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + if self.data.get("serverCertificateEncoded") is not None and self.data.get("privateKeyEncoded") is not None: + for item in ["serverCertificateEncoded", "caBundleEncoded"]: + if self.data.get(item) != cert_data.get(item): + update = True + + if update: + self.na_helper.changed = True + + result_message = "" + resp_data = cert_data + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.update_grid_certificate(cert_type) + resp_data = self.get_grid_certificate(cert_type) + result_message = "Grid %s removed" % cert_type + + else: + self.update_grid_certificate(cert_type) + resp_data = self.get_grid_certificate(cert_type) + result_message = "Grid %s updated" % cert_type + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_certificate = SgGridCertificate() + na_sg_grid_certificate.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_client_certificate.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_client_certificate.py new file mode 100644 index 000000000..aa381b397 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_client_certificate.py @@ -0,0 +1,265 @@ +#!/usr/bin/python + +# (c) 2022, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Certificates""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_sg_grid_client_certificate +short_description: Manage Client Certificates on StorageGRID +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.11.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Client Certificates on NetApp StorageGRID. +options: + state: + description: + - Whether the specified certificate should exist. + type: str + choices: ['present', 'absent'] + default: present + certificate_id: + description: + - ID of the client certificate. + type: str + display_name: + description: + - A display name for the client certificate configuration. + - This parameter can be modified if I(certificate_id) is also specified. + type: str + public_key: + description: + - X.509 client certificate in PEM-encoding. + type: str + allow_prometheus: + description: + - Whether the external monitoring tool can access Prometheus metrics. + type: bool +""" + +EXAMPLES = """ + - name: create client certificate + netapp.storagegrid.na_sg_grid_client_certificate: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + display_name: client-cert1 + public_key: | + -----BEGIN CERTIFICATE----- + MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2..swCQYDVQQGEwJB + BAMMHnNnYW4wMS5kZXYubWljcm9icmV3Lm5ldGFwcC5hdTCC..IwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMvjm9I35lmKcC7ITVL8+QiZ..lvdkbfZCUQrfdy + 71inP+XmPjs0rnkhICA9ItODteRcVlO+t7nDTfm7HgG0mJFk..m0ffyEYrcx24qu + S7gXYQjRsJmrep1awoaCa20BMGuqK2WKI3IvZ7YiT22qkBqK..+hIFffX6u3Jy+B + 77pR6YcATtpMHW/AaOx+OX9l80dIRsRZKMDxYQ== + -----END CERTIFICATE----- + allow_prometheus: true + + - name: rename client certificate + netapp.storagegrid.na_sg_grid_client_certificate: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + certificate_id: 00000000-0000-0000-0000-000000000000 + display_name: client-cert1-rename + public_key: | + -----BEGIN CERTIFICATE----- + MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2..swCQYDVQQGEwJB + BAMMHnNnYW4wMS5kZXYubWljcm9icmV3Lm5ldGFwcC5hdTCC..IwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMvjm9I35lmKcC7ITVL8+QiZ..lvdkbfZCUQrfdy + 71inP+XmPjs0rnkhICA9ItODteRcVlO+t7nDTfm7HgG0mJFk..m0ffyEYrcx24qu + S7gXYQjRsJmrep1awoaCa20BMGuqK2WKI3IvZ7YiT22qkBqK..+hIFffX6u3Jy+B + 77pR6YcATtpMHW/AaOx+OX9l80dIRsRZKMDxYQ== + -----END CERTIFICATE----- + allow_prometheus: true + + - name: delete client certificate + netapp.storagegrid.na_sg_grid_client_certificate: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: absent + display_name: client-cert1-rename +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID server certificates. + returned: success + type: dict + sample: { + "id": "abcABC_01234-0123456789abcABCabc0123456789==", + "displayName": "client-cert1", + "expiryDate": "2024-01-01T00:00:00.000Z", + "publicKey": "-----BEGIN CERTIFICATE-----MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGE...-----END CERTIFICATE-----", + "allowPrometheus": true + } +""" + +import json + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridClientCertificate: + """ + Update StorageGRID client certificates + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + certificate_id=dict(required=False, type="str"), + display_name=dict(required=False, type="str"), + public_key=dict(required=False, type="str"), + allow_prometheus=dict(required=False, type="bool"), + ) + ) + + parameter_map = { + "display_name": "displayName", + "public_key": "publicKey", + "allow_prometheus": "allowPrometheus", + } + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["display_name", "public_key"])], + required_one_of=[("display_name", "certificate_id")], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + + if self.parameters["state"] == "present": + for k in parameter_map.keys(): + if self.parameters.get(k) is not None: + self.data[parameter_map[k]] = self.parameters[k] + + self.module.fail_json + + def get_grid_client_certificate_id(self): + # Check if certificate with name exists + # Return certificate ID if found, or None + api = "api/v3/grid/client-certificates" + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + for cert in response.get("data"): + if cert["displayName"] == self.parameters["display_name"]: + return cert["id"] + return None + + def get_grid_client_certificate(self, cert_id): + api = "api/v3/grid/client-certificates/%s" % cert_id + account, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + else: + return account["data"] + return None + + def create_grid_client_certificate(self): + api = "api/v3/grid/client-certificates" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error["text"]) + + return response["data"] + + def delete_grid_client_certificate(self, cert_id): + api = "api/v3/grid/client-certificates/" + cert_id + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def update_grid_client_certificate(self, cert_id): + api = "api/v3/grid/client-certificates/" + cert_id + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error["text"]) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + client_certificate = None + + if self.parameters.get("certificate_id"): + client_certificate = self.get_grid_client_certificate(self.parameters["certificate_id"]) + + else: + client_cert_id = self.get_grid_client_certificate_id() + if client_cert_id: + client_certificate = self.get_grid_client_certificate(client_cert_id) + + cd_action = self.na_helper.get_cd_action(client_certificate, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + modify = self.na_helper.get_modified_attributes(client_certificate, self.data) + + result_message = "" + resp_data = client_certificate + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "delete": + self.delete_grid_client_certificate(client_certificate["id"]) + result_message = "Client Certificate deleted" + elif cd_action == "create": + resp_data = self.create_grid_client_certificate() + result_message = "Client Certificate created" + elif modify: + resp_data = self.update_grid_client_certificate(client_certificate["id"]) + result_message = "Client Certificate updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_certificate = SgGridClientCertificate() + na_sg_grid_certificate.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_dns.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_dns.py new file mode 100644 index 000000000..95e4e4594 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_dns.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Grid DNS Servers""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_grid_dns +short_description: NetApp StorageGRID manage external DNS servers for the grid. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Update NetApp StorageGRID DNS addresses. +options: + state: + description: + - Whether the specified DNS address should exist or not. + - Required for all operations. + type: str + choices: ['present'] + default: present + dns_servers: + description: + - List of comma separated DNS Addresses to be updated or delete. + type: list + elements: str + required: true +""" + +EXAMPLES = """ + - name: update DNS servers on StorageGRID + netapp.storagegrid.na_sg_grid_dns: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + dns_servers: "x.x.x.x,xxx.xxx.xxx.xxx" +""" + +RETURN = """ +resp: + description: Returns information about the configured DNS servers. + returned: success + type: list + elements: str + sample: ["8.8.8.8", "8.8.4.4"] +""" + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridDns(object): + """ + Create, modify and delete DNS entries for StorageGRID + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present"], default="present"), + dns_servers=dict(required=True, type="list", elements="str"), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + # required_if=[("state", "present", ["state", "name", "protocol"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = self.parameters["dns_servers"] + + def get_grid_dns(self): + # Check if tenant account exists + # Return tenant account info if found, or None + api = "api/v3/grid/dns-servers" + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def update_grid_dns(self): + api = "api/v3/grid/dns-servers" + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + grid_dns = self.get_grid_dns() + + cd_action = self.na_helper.get_cd_action(grid_dns, self.parameters["dns_servers"]) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + dns_diff = [i for i in self.data + grid_dns if i not in self.data or i not in grid_dns] + if dns_diff: + update = True + + if update: + self.na_helper.changed = True + result_message = "" + resp_data = grid_dns + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + resp_data = self.update_grid_dns() + result_message = "Grid DNS updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_dns = SgGridDns() + na_sg_grid_dns.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_gateway.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_gateway.py new file mode 100644 index 000000000..9202decff --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_gateway.py @@ -0,0 +1,532 @@ +#!/usr/bin/python + +# (c) 2021, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Load Balancer Endpoints""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_sg_grid_gateway +short_description: Manage Load balancer (gateway) endpoints on StorageGRID. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.7.0' +author: NetApp Ansible Team (@jkandati) +description: +- Create or Update Load Balancer Endpoints on StorageGRID. +- This module is idempotent if I(private_key) is not specified. +- The module will match an existing config based on I(port) and I(display_name). +- If multiple load balancer endpoints exist utilizing the same port and display name, use I(gateway_id) to select the intended endpoint. +options: + state: + description: + - Whether the specified load balancer endpoint should be configured. + type: str + choices: ['present', 'absent'] + default: present + gateway_id: + description: + - ID of the load balancer endpoint. + type: str + version_added: '21.9.0' + display_name: + description: + - A display name for the configuration. + - This parameter can be modified if I(gateway_id) is also specified. + type: str + port: + description: + - The TCP port to serve traffic on. + - This parameter cannot be modified after the load balancer endpoint has been created. + type: int + required: true + secure: + description: + - Whether the load balancer endpoint serves HTTP or HTTPS traffic. + - This parameter cannot be modified after the load balancer endpoint has been created. + type: bool + default: true + enable_ipv4: + description: + - Indicates whether to listen for connections on IPv4. + type: bool + default: true + enable_ipv6: + description: + - Indicates whether to listen for connections on IPv6. + type: bool + default: true + binding_mode: + description: + - Binding mode to restrict accessibility of the load balancer endpoint. + - A binding mode other than I(global) requires StorageGRID 11.5 or greater. + type: str + choices: ['global', 'ha-groups', 'node-interfaces'] + default: 'global' + version_added: '21.9.0' + ha_groups: + description: + - A set of StorageGRID HA Groups by name or UUID to bind the load balancer endpoint to. + - Option is ignored unless I(binding_mode=ha-groups). + type: list + elements: str + version_added: '21.9.0' + node_interfaces: + description: + - A set of StorageGRID node interfaces to bind the load balancer endpoint to. + type: list + elements: dict + suboptions: + node: + description: + - Name of the StorageGRID node. + type: str + interface: + description: + - The interface to bind to. eth0 corresponds to the Grid Network, eth1 to the Admin Network, and eth2 to the Client Network. + type: str + version_added: '21.9.0' + default_service_type: + description: + - The type of service to proxy through the load balancer. + type: str + choices: ['s3', 'swift'] + default: 's3' + server_certificate: + description: + - X.509 server certificate in PEM-encoding. + - Omit if using default certificates. + type: str + required: false + private_key: + description: + - Certficate private key in PEM-encoding. + - Required if I(server_certificate) is not empty. + type: str + required: false + ca_bundle: + description: + - Intermediate CA certificate bundle in concatenated PEM-encoding. + - Omit when there is no intermediate CA. + type: str + required: false + +""" +EXAMPLES = """ + - name: Create and Upload Certificate to a Gateway Endpoint with global binding + netapp.storagegrid.na_sg_grid_gateway: + api_url: "https://" + auth_token: "storagegrid-auth-token" + displayName: "FabricPool Endpoint" + port: 10443 + secure: True + enable_ipv4: True + enable_ipv6: True + default_service_type: "s3" + server_certificate: | + -----BEGIN CERTIFICATE----- + MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2..swCQYDVQQGEwJB + BAMMHnNnYW4wMS5kZXYubWljcm9icmV3Lm5ldGFwcC5hdTCC..IwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMvjm9I35lmKcC7ITVL8+QiZ..lvdkbfZCUQrfdy + 71inP+XmPjs0rnkhICA9ItODteRcVlO+t7nDTfm7HgG0mJFk..m0ffyEYrcx24qu + S7gXYQjRsJmrep1awoaCa20BMGuqK2WKI3IvZ7YiT22qkBqK..+hIFffX6u3Jy+B + 77pR6YcATtpMHW/AaOx+OX9l80dIRsRZKMDxYQ== + -----END CERTIFICATE----- + private_key: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIB..DL45vSN+ZZinAu + L25W0+cz1Oi69AKkI7d9nbFics2ay5+7o+4rKqf3en2R4MSx..vy+iDlOmATib5O + x8TN5pJ9AgMBAAECggEADDLM8tHXXUoUFihzv+BUwff8p8Yc..cXFcSes+xTd5li + po8lNsx/v2pQx4ByBkuaYLZGIEXOWS6gkp44xhIXgQKBgQD4..7862u5HLbmhrV3 + vs8nC69b3QKBgQDacCD8d8JpwPbg8t2VjXM3UvdmgAaLUfU7..DWV+W3jqzmDOoN + zWVgPbPNj0UmzvLDbgxLoxe77wjn2BHsAJVAfJ9VeQKBgGqF..gYO+wHR8lJUoa5 + ZEe8Upy2oBtvND/0dnwO2ym2FGsBJN0Gr4NKdG5vkzLsthKk..Rm0ikwEUOUZQKE + K8J5yEVeo9K2v3wggtq8fYn6 + -----END PRIVATE KEY----- + validate_certs: false + + - name: Create a HTTP Gateway Endpoint with HA Group Binding + netapp.storagegrid.na_sg_grid_gateway: + api_url: "https://" + auth_token: "storagegrid-auth-token" + displayName: "App Endpoint 1" + port: 10501 + secure: false + enable_ipv4: True + enable_ipv6: True + default_service_type: "s3" + binding_mode: ha-groups + ha_groups: site1_ha_group + validate_certs: false + + - name: Create a HTTP Gateway Endpoint with Node Interface Binding + netapp.storagegrid.na_sg_grid_gateway: + api_url: "https://" + auth_token: "storagegrid-auth-token" + displayName: "App Endpoint 2" + port: 10502 + secure: false + enable_ipv4: True + enable_ipv6: True + default_service_type: "s3" + binding_mode: node-interfaces + node_interfaecs: + - node: SITE1_ADM1 + interface: eth2 + - node: SITE2_ADM1 + interface: eth2 + validate_certs: false + + - name: Delete Gateway Endpoint + netapp.storagegrid.na_sg_grid_gateway: + api_url: "https://" + auth_token: "storagegrid-auth-token" + displayName: "App Endpoint 2" + port: 10502 + default_service_type: "s3" + validate_certs: false +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID Load Balancer Endpoint. + returned: success + type: dict + sample: { + "id": "ffffffff-ffff-ffff-ffff-ffffffffffff", + "displayName": "ansibletest-secure", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + "defaultServiceType": "s3", + "certSource": "plaintext", + "plaintextCertData": { + "serverCertificateEncoded": "-----BEGIN CERTIFICATE-----MIIC6DCCAdACCQC7l4WukhKD0zANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGE...-----END CERTIFICATE-----", + "caBundleEncoded": "-----BEGIN CERTIFICATE-----MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELM...-----END CERTIFICATE-----", + "metadata": {...} + } + } +""" + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridGateway: + """ + Create, modify and delete Gateway entries for StorageGRID + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + # Arguments for Creating Gateway Port + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + gateway_id=dict(required=False, type="str"), + display_name=dict(required=False, type="str"), + port=dict(required=True, type="int"), + secure=dict(required=False, type="bool", default=True), + enable_ipv4=dict(required=False, type="bool", default=True), + enable_ipv6=dict(required=False, type="bool", default=True), + binding_mode=dict( + required=False, type="str", choices=["global", "ha-groups", "node-interfaces"], default="global" + ), + ha_groups=dict(required=False, type="list", elements="str"), + node_interfaces=dict( + required=False, + type="list", + elements="dict", + options=dict( + node=dict(required=False, type="str"), + interface=dict(required=False, type="str"), + ), + ), + # Arguments for setting Gateway Virtual Server + default_service_type=dict(required=False, type="str", choices=["s3", "swift"], default="s3"), + server_certificate=dict(required=False, type="str"), + ca_bundle=dict(required=False, type="str"), + private_key=dict(required=False, type="str", no_log=True), + ) + ) + + parameter_map_gateway = { + "gateway_id": "id", + "display_name": "displayName", + "port": "port", + "secure": "secure", + "enable_ipv4": "enableIPv4", + "enable_ipv6": "enableIPv6", + } + parameter_map_server = { + "server_certificate": "serverCertificateEncoded", + "ca_bundle": "caBundleEncoded", + "private_key": "privateKeyEncoded", + } + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["display_name"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Get API version + self.rest_api.get_sg_product_version() + + # Checking for the parameters passed and create new parameters list + + # Parameters for creating a new gateway port configuration + self.data_gateway = {} + self.data_gateway["accountId"] = "0" + + for k in parameter_map_gateway.keys(): + if self.parameters.get(k) is not None: + self.data_gateway[parameter_map_gateway[k]] = self.parameters[k] + + # Parameters for setting a gateway virtual server configuration for a gateway port + self.data_server = {} + self.data_server["defaultServiceType"] = self.parameters["default_service_type"] + + if self.parameters["secure"]: + self.data_server["plaintextCertData"] = {} + self.data_server["certSource"] = "plaintext" + + for k in parameter_map_server.keys(): + if self.parameters.get(k) is not None: + self.data_server["plaintextCertData"][parameter_map_server[k]] = self.parameters[k] + + if self.parameters["binding_mode"] != "global": + self.rest_api.fail_if_not_sg_minimum_version("non-global binding mode", 11, 5) + + if self.parameters["binding_mode"] == "ha-groups": + self.data_gateway["pinTargets"] = {} + self.data_gateway["pinTargets"]["haGroups"] = self.build_ha_group_list() + self.data_gateway["pinTargets"]["nodeInterfaces"] = [] + + elif self.parameters["binding_mode"] == "node-interfaces": + self.data_gateway["pinTargets"] = {} + self.data_gateway["pinTargets"]["nodeInterfaces"] = self.build_node_interface_list() + self.data_gateway["pinTargets"]["haGroups"] = [] + + else: + self.data_gateway["pinTargets"] = {} + self.data_gateway["pinTargets"]["haGroups"] = [] + self.data_gateway["pinTargets"]["nodeInterfaces"] = [] + + def build_ha_group_list(self): + ha_group_ids = [] + + api = "api/v3/private/ha-groups" + ha_groups, error = self.rest_api.get(api) + if error: + self.module.fail_json(msg=error) + + for param in self.parameters["ha_groups"]: + ha_group = next( + (item for item in ha_groups["data"] if (item["name"] == param or item["id"] == param)), None + ) + if ha_group is not None: + ha_group_ids.append(ha_group["id"]) + else: + self.module.fail_json(msg="HA Group '%s' is invalid" % param) + + return ha_group_ids + + def build_node_interface_list(self): + node_interfaces = [] + + api = "api/v3/grid/node-health" + nodes, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + for node_interface in self.parameters["node_interfaces"]: + node_dict = {} + node = next((item for item in nodes["data"] if item["name"] == node_interface["node"]), None) + if node is not None: + node_dict["nodeId"] = node["id"] + node_dict["interface"] = node_interface["interface"] + node_interfaces.append(node_dict) + else: + self.module.fail_json(msg="Node '%s' is invalid" % node_interface["node"]) + + return node_interfaces + + def get_grid_gateway_config(self, gateway_id): + api = "api/v3/private/gateway-configs/%s" % gateway_id + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + gateway = response["data"] + gateway_config = self.get_grid_gateway_server_config(gateway["id"]) + + return gateway, gateway_config + + def get_grid_gateway_server_config(self, gateway_id): + api = "api/v3/private/gateway-configs/%s/server-config" % gateway_id + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def get_grid_gateway_ports(self, target_port): + + configured_ports = [] + gateway = {} + gateway_config = {} + + api = "api/v3/private/gateway-configs" + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + grid_gateway_ports = response["data"] + + # Get only a list of used ports + configured_ports = [data["port"] for data in grid_gateway_ports] + + for index, port in enumerate(configured_ports): + # if port already exists then get gateway ID and get the gateway port server configs + if target_port == port and grid_gateway_ports[index]["displayName"] == self.parameters["display_name"]: + gateway = grid_gateway_ports[index] + gateway_config = self.get_grid_gateway_server_config(gateway["id"]) + break + + return gateway, gateway_config + + def create_grid_gateway(self): + api = "api/v3/private/gateway-configs" + response, error = self.rest_api.post(api, self.data_gateway) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_grid_gateway(self, gateway_id): + api = "api/v3/private/gateway-configs/" + gateway_id + self.data = None + response, error = self.rest_api.delete(api, self.data) + + if error: + self.module.fail_json(msg=error) + + def update_grid_gateway(self, gateway_id): + api = "api/v3/private/gateway-configs/%s" % gateway_id + response, error = self.rest_api.put(api, self.data_gateway) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def update_grid_gateway_server(self, gateway_id): + api = "api/v3/private/gateway-configs/%s/server-config" % gateway_id + response, error = self.rest_api.put(api, self.data_server) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + gateway = None + gateway_config = None + + update_gateway = False + update_gateway_server = False + + if self.parameters.get("gateway_id"): + gateway, gateway_config = self.get_grid_gateway_config(self.parameters["gateway_id"]) + + else: + # Get list of all gateway port configurations + gateway, gateway_config = self.get_grid_gateway_ports(self.data_gateway["port"]) + + cd_action = self.na_helper.get_cd_action(gateway.get("id"), self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + update = False + + if self.data_server.get("plaintextCertData"): + if self.data_server["plaintextCertData"].get("privateKeyEncoded") is not None: + update = True + self.module.warn("This module is not idempotent when private_key is present.") + + if gateway_config.get("plaintextCertData"): + # If certificate private key supplied, update + if gateway_config["plaintextCertData"].get("metadata"): + # remove metadata because we can't compare that + del gateway_config["plaintextCertData"]["metadata"] + + # compare current and desired state + # gateway config cannot be modified until StorageGRID 11.5 + if self.rest_api.meets_sg_minimum_version(11, 5): + update_gateway = self.na_helper.get_modified_attributes(gateway, self.data_gateway) + update_gateway_server = self.na_helper.get_modified_attributes(gateway_config, self.data_server) + + if update: + self.na_helper.changed = True + + result_message = "" + resp_data = {} + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "delete": + self.delete_grid_gateway(gateway["id"]) + result_message = "Load Balancer Gateway Port Deleted" + + elif cd_action == "create": + resp_data = self.create_grid_gateway() + gateway["id"] = resp_data["id"] + resp_data_server = self.update_grid_gateway_server(gateway["id"]) + resp_data.update(resp_data_server) + result_message = "Load Balancer Gateway Port Created" + + else: + resp_data = gateway + if update_gateway: + resp_data = self.update_grid_gateway(gateway["id"]) + resp_data.update(gateway_config) + + if update_gateway_server: + resp_data_server = self.update_grid_gateway_server(gateway["id"]) + resp_data.update(resp_data_server) + result_message = "Load Balancer Gateway Port Updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_gateway = SgGridGateway() + na_sg_grid_gateway.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_group.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_group.py new file mode 100644 index 000000000..60592c609 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_group.py @@ -0,0 +1,341 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Grid Groups""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_grid_group +short_description: NetApp StorageGRID manage groups. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Administration Groups within NetApp StorageGRID. +options: + state: + description: + - Whether the specified group should exist or not. + type: str + choices: ['present', 'absent'] + default: present + display_name: + description: + - Name of the group. + - Required for create operation + type: str + unique_name: + description: + - Unique Name for the group. Must begin with C(group/) or C(federated-group/) + - Required for create, modify or delete operation. + type: str + required: true + management_policy: + description: + - Management access controls granted to the group within the tenancy. + type: dict + suboptions: + alarm_acknowledgement: + description: + - Group members can have permission to acknowledge alarms. + required: false + type: bool + other_grid_configuration: + description: + - Need to investigate. + required: false + type: bool + grid_topology_page_configuration: + description: + - Users in this group will have permissions to change grid topology. + required: false + type: bool + tenant_accounts: + description: + - Users in this group will have permissions to manage tenant accounts. + required: false + type: bool + change_tenant_root_password: + description: + - Users in this group will have permissions to change tenant password. + required: false + type: bool + maintenance: + description: + - Users in this group will have permissions to run maintenance tasks on StorageGRID. + required: false + type: bool + metrics_query: + description: + - Users in this group will have permissions to query metrics on StorageGRID. + required: false + type: bool + activate_features: + description: + - Users in this group will have permissions to reactivate features. + required: false + type: bool + ilm: + description: + - Users in this group will have permissions to manage ILM rules on StorageGRID. + required: false + type: bool + object_metadata: + description: + - Users in this group will have permissions to manage object metadata. + required: false + type: bool + root_access: + description: + - Users in this group will have root access. + required: false + type: bool +""" + +EXAMPLES = """ + - name: create a StorageGRID group + netapp.storagegrid.na_sg_grid_group: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + display_name: ansiblegroup100 + unique_name: group/ansiblegroup100 + management_policy: + tenant_accounts: true + maintenance: true + root_access: false +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID group attributes. + returned: success + type: dict + sample: { + "displayName": "Example Group", + "policies": { + "management": { + "alarmAcknowledgment": true, + "manageAlerts": true, + "otherGridConfiguration": true, + "gridTopologyPageConfiguration": true, + "tenantAccounts": true, + "changeTenantRootPassword": true, + "maintenance": true, + "metricsQuery": true, + "activateFeatures": false, + "ilm": true, + "objectMetadata": true, + "storageAdmin": true, + "rootAccess": true + } + }, + "uniqueName": "group/examplegroup", + "accountId": "12345678901234567890", + "id": "00000000-0000-0000-0000-000000000000", + "federated": false, + "groupURN": "urn:sgws:identity::12345678901234567890:group/examplegroup" + } +""" + +import json +import re + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridGroup(object): + """ + Create, modify and delete StorageGRID Grid-administration Group + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + display_name=dict(required=False, type="str"), + unique_name=dict(required=True, type="str"), + management_policy=dict( + required=False, + type="dict", + options=dict( + alarm_acknowledgement=dict(required=False, type="bool"), + other_grid_configuration=dict(required=False, type="bool"), + grid_topology_page_configuration=dict(required=False, type="bool"), + tenant_accounts=dict(required=False, type="bool"), + change_tenant_root_password=dict(required=False, type="bool"), + maintenance=dict(required=False, type="bool"), + metrics_query=dict(required=False, type="bool"), + activate_features=dict(required=False, type="bool"), + ilm=dict(required=False, type="bool"), + object_metadata=dict(required=False, type="bool"), + root_access=dict(required=False, type="bool"), + ), + ), + ) + ) + parameter_map = { + "alarm_acknowledgement": "alarmAcknowledgement", + "other_grid_configuration": "otherGridConfiguration", + "grid_topology_page_configuration": "gridTopologyPageConfiguration", + "tenant_accounts": "tenantAccounts", + "change_tenant_root_password": "changeTenantRootPassword", + "maintenance": "maintenance", + "metrics_query": "metricsQuery", + "activate_features": "activateFeatures", + "ilm": "ilm", + "object_metadata": "objectMetadata", + "root_access": "rootAccess", + } + self.module = AnsibleModule(argument_spec=self.argument_spec, supports_check_mode=True,) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + self.data["displayName"] = self.parameters.get("display_name") + self.data["uniqueName"] = self.parameters["unique_name"] + # Only add the parameter if value is True, as JSON response does not include non-true objects + self.data["policies"] = {} + + if self.parameters.get("management_policy"): + self.data["policies"] = { + "management": dict( + (parameter_map[k], v) for (k, v) in self.parameters["management_policy"].items() if v + ) + } + if not self.data["policies"].get("management"): + self.data["policies"]["management"] = None + + self.re_local_group = re.compile("^group/") + self.re_fed_group = re.compile("^federated-group/") + + if ( + self.re_local_group.match(self.parameters["unique_name"]) is None + and self.re_fed_group.match(self.parameters["unique_name"]) is None + ): + self.module.fail_json(msg="unique_name must begin with 'group/' or 'federated-group/'") + + def get_grid_group(self, unique_name): + # Use the unique name to check if the group exists + api = "api/v3/grid/groups/%s" % unique_name + response, error = self.rest_api.get(api) + + if error: + if response["code"] != 404: + self.module.fail_json(msg=error) + else: + return response["data"] + return None + + def create_grid_group(self): + api = "api/v3/grid/groups" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_grid_group(self, group_id): + api = "api/v3/grid/groups/" + group_id + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def update_grid_group(self, group_id): + api = "api/v3/grid/groups/" + group_id + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + grid_group = self.get_grid_group(self.parameters["unique_name"]) + + cd_action = self.na_helper.get_cd_action(grid_group, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + if self.parameters.get("management_policy"): + if ( + grid_group.get("policies") is None + or grid_group.get("policies", {}).get("management") != self.data["policies"]["management"] + ): + update = True + + if update: + self.na_helper.changed = True + result_message = "" + resp_data = grid_group + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.delete_grid_group(grid_group["id"]) + result_message = "Grid Group deleted" + + elif cd_action == "create": + resp_data = self.create_grid_group() + result_message = "Grid Group created" + + else: + # for a federated group, the displayName parameter needs to be specified + # and must match the existing displayName + if self.re_fed_group.match(self.parameters["unique_name"]): + self.data["displayName"] = grid_group["displayName"] + + resp_data = self.update_grid_group(grid_group["id"]) + result_message = "Grid Group updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_group = SgGridGroup() + na_sg_grid_group.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ha_group.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ha_group.py new file mode 100644 index 000000000..c99719c6d --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ha_group.py @@ -0,0 +1,334 @@ +#!/usr/bin/python + +# (c) 2022, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage HA Groups""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_sg_grid_ha_group +short_description: Manage high availability (HA) group configuration on StorageGRID. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.10.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete HA Groups on NetApp StorageGRID. +options: + state: + description: + - Whether the specified HA Group should exist. + type: str + choices: ['present', 'absent'] + default: present + name: + description: + - Name of the HA Group. + type: str + ha_group_id: + description: + - HA Group ID. + - May be used for modify or delete operation. + type: str + description: + description: + - Description of the HA Group. + type: str + gateway_cidr: + description: + - CIDR for the gateway IP and VIP subnet. + type: str + virtual_ips: + description: + - A list of virtual IP addresses. + type: list + elements: str + interfaces: + description: + - A set of StorageGRID node interface pairs. + - The primary interface is specified first, followed by the other interface pairs in failover order. + type: list + elements: dict + suboptions: + node: + description: + - Name of the StorageGRID node. + type: str + interface: + description: + - The interface to bind to. eth0 corresponds to the Grid Network, eth1 to the Admin Network, and eth2 to the Client Network. + type: str +""" + +EXAMPLES = """ + - name: create HA Group + netapp.storagegrid.na_sg_grid_ha_group: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: Site1-HA-Group + description: "Site 1 HA Group" + gateway_cidr: 192.168.50.1/24 + virtual_ips: 192.168.50.5 + interfaces: + - node: SITE1-ADM1 + interface: eth2 + - node: SITE1-G1 + interface: eth2 + + - name: add VIP to HA Group + netapp.storagegrid.na_sg_grid_ha_group: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: Site1-HA-Group + description: "Site 1 HA Group" + gateway_cidr: 192.168.50.1/24 + virtual_ips: 192.168.50.5,192.168.50.6 + interfaces: + - node: SITE1-ADM1 + interface: eth2 + - node: SITE1-G1 + interface: eth2 + + - name: rename HA Group + netapp.storagegrid.na_sg_grid_ha_group: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + ha_group_id: 00000000-0000-0000-0000-000000000000 + name: Site1-HA-Group-New-Name + description: "Site 1 HA Group" + gateway_cidr: 192.168.50.1/24 + virtual_ips: 192.168.50.5 + interfaces: + - node: SITE1-ADM1 + interface: eth2 + - node: SITE1-G1 + interface: eth2 + + - name: delete HA Group + netapp.storagegrid.na_sg_grid_ha_group: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: absent + name: Site1-HA-Group +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID HA Group. + returned: success + type: dict + sample: { + "description": "Site 1 HA Group", + "gatewayCidr": "192.168.50.1/24", + "id": "bb386f30-805d-4fec-a2c5-85790b460db0", + "interfaces": [ + { + "interface": "eth2", + "nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b" + }, + { + "interface": "eth2", + "nodeId": "7bb5bf05-a04c-4344-8abd-08c5c4048666" + } + ], + "name": "Site1-HA-Group", + "virtualIps": [ + "192.168.50.5", + "192.168.50.6" + ] + } +""" + +import json + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridHaGroup: + """ + Create, modify and delete HA Group configurations for StorageGRID + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + name=dict(required=False, type="str"), + ha_group_id=dict(required=False, type="str"), + description=dict(required=False, type="str"), + gateway_cidr=dict(required=False, type="str"), + virtual_ips=dict(required=False, type="list", elements="str"), + interfaces=dict( + required=False, + type="list", + elements="dict", + options=dict( + node=dict(required=False, type="str"), + interface=dict(required=False, type="str"), + ), + ), + ) + ) + + parameter_map = { + "name": "name", + "description": "description", + "gateway_cidr": "gatewayCidr", + "virtual_ips": "virtualIps", + } + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["name", "gateway_cidr", "virtual_ips", "interfaces"])], + required_one_of=[("name", "ha_group_id")], + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + + if self.parameters["state"] == "present": + for k in parameter_map.keys(): + if self.parameters.get(k) is not None: + self.data[parameter_map[k]] = self.parameters[k] + + if self.parameters.get("interfaces") is not None: + self.data["interfaces"] = self.build_node_interface_list() + + def build_node_interface_list(self): + node_interfaces = [] + + api = "api/v3/grid/node-health" + nodes, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + for node_interface in self.parameters["interfaces"]: + node_dict = {} + node = next((item for item in nodes["data"] if item["name"] == node_interface["node"]), None) + if node is not None: + node_dict["nodeId"] = node["id"] + node_dict["interface"] = node_interface["interface"] + node_interfaces.append(node_dict) + else: + self.module.fail_json(msg="Node '%s' is invalid" % node_interface["node"]) + + return node_interfaces + + def get_ha_group_id(self): + # Check if HA Group exists + # Return HA Group info if found, or None + api = "api/v3/private/ha-groups" + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return next((item["id"] for item in response.get("data") if item["name"] == self.parameters["name"]), None) + + def get_ha_group(self, ha_group_id): + api = "api/v3/private/ha-groups/%s" % ha_group_id + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def create_ha_group(self): + api = "api/v3/private/ha-groups" + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_ha_group(self, ha_group_id): + api = "api/v3/private/ha-groups/%s" % ha_group_id + dummy, error = self.rest_api.delete(api, self.data) + + if error: + self.module.fail_json(msg=error) + + def update_ha_group(self, ha_group_id): + api = "api/v3/private/ha-groups/%s" % ha_group_id + response, error = self.rest_api.put(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + ha_group = None + + if self.parameters.get("ha_group_id"): + ha_group = self.get_ha_group(self.parameters["ha_group_id"]) + else: + ha_group_id = self.get_ha_group_id() + if ha_group_id: + ha_group = self.get_ha_group(ha_group_id) + + cd_action = self.na_helper.get_cd_action(ha_group, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + modify = self.na_helper.get_modified_attributes(ha_group, self.data) + + result_message = "" + resp_data = {} + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "delete": + self.delete_ha_group(ha_group["id"]) + result_message = "HA Group deleted" + elif cd_action == "create": + resp_data = self.create_ha_group() + result_message = "HA Group created" + elif modify: + resp_data = self.update_ha_group(ha_group["id"]) + result_message = "HA Group updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_ha_group = SgGridHaGroup() + na_sg_grid_ha_group.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_identity_federation.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_identity_federation.py new file mode 100644 index 000000000..729cf4545 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_identity_federation.py @@ -0,0 +1,335 @@ +#!/usr/bin/python + +# (c) 2021, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Grid Identity Federation""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_sg_grid_identity_federation +short_description: NetApp StorageGRID manage Grid identity federation. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Configure Grid Identity Federation within NetApp StorageGRID. +- If module is run with I(check_mode), a connectivity test will be performed using the supplied values without changing the configuration. +- This module is idempotent if I(password) is not specified. +options: + state: + description: + - Whether identity federation should be enabled or not. + type: str + choices: ['present', 'absent'] + default: present + username: + description: + - The username to bind to the LDAP server. + type: str + password: + description: + - The password associated with the username. + type: str + hostname: + description: + - The hostname or IP address of the LDAP server. + type: str + port: + description: + - The port used to connect to the LDAP server. Typically 389 for LDAP, or 636 for LDAPS. + type: int + base_group_dn: + description: + - The Distinguished Name of the LDAP subtree to search for groups. + type: str + base_user_dn: + description: + - The Distinguished Name of the LDAP subtree to search for users. + type: str + ldap_service_type: + description: + - The type of LDAP server. + choices: ['Active Directory', 'OpenLDAP', 'Other'] + type: str + type: + description: + - The type of identity source. + - Default is C(ldap). + type: str + default: ldap + ldap_user_id_attribute: + description: + - The LDAP attribute which contains the unique user name of a user. + - Should be configured if I(ldap_service_type=Other). + type: str + ldap_user_uuid_attribute: + description: + - The LDAP attribute which contains the permanent unique identity of a user. + - Should be configured if I(ldap_service_type=Other). + type: str + ldap_group_id_attribute: + description: + - The LDAP attribute which contains the group for a user. + - Should be configured if I(ldap_service_type=Other). + type: str + ldap_group_uuid_attribute: + description: + - The LDAP attribute which contains the group's permanent unique identity. + - Should be configured if I(ldap_service_type=Other). + type: str + tls: + description: + - Whether Transport Layer Security is used to connect to the LDAP server. + choices: ['STARTTLS', 'LDAPS', 'Disabled'] + type: str + default: STARTTLS + ca_cert: + description: + - Custom certificate used to connect to the LDAP server. + - If a custom certificate is not supplied, the operating system CA certificate will be used. + type: str +""" + +EXAMPLES = """ + - name: test identity federation configuration + netapp.storagegrid.na_sg_grid_identity_federation: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + ldap_service_type: "Active Directory" + hostname: "ad.example.com" + port: 389 + username: "binduser" + password: "bindpass" + base_group_dn: "DC=example,DC=com" + base_user_dn: "DC=example,DC=com" + tls: "Disabled" + check_mode: yes + + - name: configure identity federation with AD and TLS + netapp.storagegrid.na_sg_grid_identity_federation: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + ldap_service_type: "Active Directory" + hostname: "ad.example.com" + port: 636 + username: "binduser" + password: "bindpass" + base_group_dn: "DC=example,DC=com" + base_user_dn: "DC=example,DC=com" + tls: "LDAPS" + ca_cert: | + -----BEGIN CERTIFICATE----- + MIIC+jCCAeICCQDmn9Gow08LTzANBgkqhkiG9w0BAQsFADA/..swCQYDVQQGEwJV + bXBsZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB..JFzNIXQEGnsgjV + JGU4giuvOLOZ8Q3gyuUbkSUQDjmjpMR8PliwJ6iW2Ity89Dv..dl1TaIYI/ansyZ + Uxk4YXeN6kUkrDtNxCg1McALzXVAfxMTtj2SFlLxne4Z6rX2..UyftQrfM13F1vY + gK8dBPz+l+X/Uozo/xNm7gxe68p9le9/pcULst1CQn5/sPqq..kgWcSvlKUItu82 + lq3B2169rovdIaNdcvaQjMPhrDGo5rvLfMN35U3Hgbz41PL5..x2BcUE6/0ab5T4 + qKBxKa3t9twj+zpUqOzyL0PFfCE+SK5fEXAS1ow4eAcLN+eB..gR/PuvGAyIPCtE + 1+X4GrECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAFpO+04Ra..FMJPH6dBmzfb7l + k04BWTvSlur6HiQdXY+oFQMJZzyI7MQ8v9HBIzS0ZAzYWLp4..VZhHmRxnrWyxVs + u783V5YfQH2L4QnBDoiDefgxyfDs2PcoF5C+X9CGXmPqzst2..y/6tdOVJzdiA== + -----END CERTIFICATE----- +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID management identity source configuration. + returned: success + type: dict + sample: { + "id": "00000000-0000-0000-0000-000000000000", + "disable": false, + "hostname": "10.1.2.3", + "port": 389, + "username": "MYDOMAIN\\\\Administrator", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "ldapServiceType": "Active Directory", + "type": "ldap", + "disableTLS": false, + "enableLDAPS": false, + "caCert": "-----BEGIN CERTIFICATE----- abcdefghijkl123456780ABCDEFGHIJKL 123456/7890ABCDEFabcdefghijklABCD -----END CERTIFICATE-----\n" + } +""" + +import json +import re + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridIdentityFederation: + """ + Configure and modify StorageGRID Grid Identity Federation + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + username=dict(required=False, type="str"), + password=dict(required=False, type="str", no_log=True), + hostname=dict(required=False, type="str"), + port=dict(required=False, type="int"), + base_group_dn=dict(required=False, type="str"), + base_user_dn=dict(required=False, type="str"), + ldap_service_type=dict(required=False, type="str", choices=["OpenLDAP", "Active Directory", "Other"]), + type=dict(required=False, type="str", default="ldap"), + ldap_user_id_attribute=dict(required=False, type="str"), + ldap_user_uuid_attribute=dict(required=False, type="str"), + ldap_group_id_attribute=dict(required=False, type="str"), + ldap_group_uuid_attribute=dict(required=False, type="str"), + tls=dict(required=False, type="str", choices=["STARTTLS", "LDAPS", "Disabled"], default="STARTTLS"), + ca_cert=dict(required=False, type="str"), + ), + ) + + parameter_map = { + "username": "username", + "password": "password", + "hostname": "hostname", + "port": "port", + "base_group_dn": "baseGroupDn", + "base_user_dn": "baseUserDn", + "ldap_service_type": "ldapServiceType", + "ldap_user_id_attribute": "ldapUserIdAttribute", + "ldap_user_uuid_attribute": "ldapUserUUIDAttribute", + "ldap_group_id_attribute": "ldapGroupIdAttribute", + "ldap_group_uuid_attribute": "ldapGroupUUIDAttribute", + "ca_cert": "caCert", + } + self.module = AnsibleModule(argument_spec=self.argument_spec, supports_check_mode=True,) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + + if self.parameters["state"] == "present": + self.data["disable"] = False + + for k in parameter_map.keys(): + if self.parameters.get(k) is not None: + self.data[parameter_map[k]] = self.parameters[k] + + if self.parameters.get("tls") == "STARTTLS": + self.data["disableTLS"] = False + self.data["enableLDAPS"] = False + elif self.parameters.get("tls") == "LDAPS": + self.data["disableTLS"] = False + self.data["enableLDAPS"] = True + else: + self.data["disableTLS"] = True + self.data["enableLDAPS"] = False + + def get_grid_identity_source(self): + api = "api/v3/grid/identity-source" + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + else: + return response["data"] + return None + + def update_identity_federation(self, test=False): + api = "api/v3/grid/identity-source" + + params = {} + + if test: + params["test"] = True + + response, error = self.rest_api.put(api, self.data, params=params) + if error: + self.module.fail_json(msg=error, payload=self.data) + + if response is not None: + return response["data"] + else: + return None + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + grid_identity_source = self.get_grid_identity_source() + + cd_action = self.na_helper.get_cd_action(grid_identity_source, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + for k in (i for i in self.data.keys() if i != "password"): + if self.data[k] != grid_identity_source.get(k): + update = True + break + + # if a password has been specified we need to update it + if self.data.get("password") and self.parameters["state"] == "present": + update = True + self.module.warn("Password attribute has been specified. Task is not idempotent.") + + if update: + self.na_helper.changed = True + + if cd_action == "delete": + # if identity federation is already in a disable state + if grid_identity_source.get("disable"): + self.na_helper.changed = False + + result_message = "" + resp_data = grid_identity_source + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "delete": + self.data = dict(disable=True) + resp_data = self.update_identity_federation() + result_message = "Grid identity federation disabled" + else: + resp_data = self.update_identity_federation() + result_message = "Grid identity federation updated" + + if self.module.check_mode: + self.update_identity_federation(test=True) + # if no error, connection test successful + self.module.exit_json(changed=self.na_helper.changed, msg="Connection test successful") + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_identity_federation = SgGridIdentityFederation() + na_sg_grid_identity_federation.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_info.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_info.py new file mode 100644 index 000000000..b14f88a22 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_info.py @@ -0,0 +1,405 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" NetApp StorageGRID Grid Info using REST APIs """ + + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +module: na_sg_grid_info +author: NetApp Ansible Team (@jasonl4) +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +short_description: NetApp StorageGRID Grid information gatherer. +description: + - This module allows you to gather various information about StorageGRID Grid configuration. +version_added: 20.11.0 + +options: + gather_subset: + type: list + elements: str + description: + - When supplied, this argument will restrict the information collected to a given subset. + - Either the info name or the REST API can be given. + - Possible values for this argument include + - C(grid_accounts_info) or C(grid/accounts) + - C(grid_alarms_info) or C(grid/alarms) + - C(grid_audit_info) or C(grid/audit) + - C(grid_compliance_global_info) or C(grid/compliance-global) + - C(grid_config_info) or C(grid/config) + - C(grid_config_management_info) or C(grid/config/management) + - C(grid_config_product_version_info) or C(grid/config/product-version) + - C(grid_deactivated_features_info) or C(grid/deactivated-features) + - C(grid_dns_servers_info) or C(grid/dns-servers) + - C(grid_domain_names_info) or C(grid/domain-names) + - C(grid_ec_profiles_info) or C(grid/ec-profiles) + - C(grid_expansion_info) or C(grid/expansion) + - C(grid_expansion_nodes_info) or C(grid/expansion/nodes) + - C(grid_expansion_sites_info) or C(grid/expansion/sites) + - C(grid_grid_networks_info) or C(grid/grid-networks) + - C(grid_groups_info) or C(grid/groups) + - C(grid_health_info) or C(grid/health) + - C(grid_health_topology_info) or C(grid/health/topology) + - C(grid_identity_source_info) or C(grid/identity-source) + - C(grid_ilm_criteria_info) or C(grid/ilm-criteria) + - C(grid_ilm_policies_info) or C(grid/ilm-policies) + - C(grid_ilm_rules_info) or C(grid/ilm-rules) + - C(grid_license_info) or C(grid/license) + - C(grid_management_certificate_info) or C(grid/management-certificate) + - C(grid_ntp_servers_info) or C(grid/ntp-servers) + - C(grid_recovery_available_nodes_info) or C(grid/recovery/available-nodes) + - C(grid_recovery_info) or C(grid/recovery) + - C(grid_regions_info) or C(grid/regions) + - C(grid_schemes_info) or C(grid/schemes) + - C(grid_snmp_info) or C(grid/snmp) + - C(grid_storage_api_certificate_info) or C(grid/storage-api-certificate) + - C(grid_untrusted_client_network_info) or C(grid/untrusted-client-network) + - C(grid_users_info) or C(grid/users) + - C(grid_users_root_info) or C(grid/users/root) + - C(versions_info) or C(versions) + - Can specify a list of values to include a larger subset. + default: all + parameters: + description: + - Allows for any rest option to be passed in. + type: dict +""" + +EXAMPLES = """ +- name: Gather StorageGRID Grid info + netapp.storagegrid.na_sg_grid_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + register: sg_grid_info + +- name: Gather StorageGRID Grid info for grid/accounts and grid/config subsets + netapp.storagegrid.na_sg_grid_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + gather_subset: + - grid_accounts_info + - grid/config + register: sg_grid_info + +- name: Gather StorageGRID Grid info for all subsets + netapp.storagegrid.na_sg_grid_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + gather_subset: + - all + register: sg_grid_info + +- name: Gather StorageGRID Grid info for grid/accounts and grid/users subsets, limit to 5 results for each subset + netapp.storagegrid.na_sg_grid_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + gather_subset: + - grid/accounts + - grid/users + parameters: + limit: 5 + register: sg_grid_info +""" + +RETURN = """ +sg_info: + description: Returns various information about the StorageGRID Grid configuration. + returned: always + type: dict + sample: { + "grid/accounts": {...}, + "grid/alarms": {...}, + "grid/audit": {...}, + "grid/compliance-global": {...}, + "grid/config": {...}, + "grid/config/management": {...}, + "grid/config/product-version": {...}, + "grid/deactivated-features": {...}, + "grid/dns-servers": {...}, + "grid/domain-names": {...}, + "grid/ec-profiles": {...}, + "grid/expansion": {...}, + "grid/expansion/nodes": {...}, + "grid/expansion/sites": {...}, + "grid/networks": {...}, + "grid/groups": {...}, + "grid/health": {...}, + "grid/health/topology": {...}, + "grid/identity-source": {...}, + "grid/ilm-criteria": {...}, + "grid/ilm-policies": {...}, + "grid/ilm-rules": {...}, + "grid/license": {...}, + "grid/management-certificate": {...}, + "grid/ntp-servers": {...}, + "grid/recovery/available-nodes": {...}, + "grid/recovery": {...}, + "grid/regions": {...}, + "grid/schemes": {...}, + "grid/snmp": {...}, + "grid/storage-api-certificate": {...}, + "grid/untrusted-client-network": {...}, + "grid/users": {...}, + "grid/users/root": {...}, + "grid/versions": {...} + } +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class NetAppSgGatherInfo(object): + """ Class with gather info methods """ + + def __init__(self): + """ + Parse arguments, setup variables, check parameters and ensure + request module is installed. + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update(dict( + gather_subset=dict(default=['all'], type='list', elements='str', required=False), + parameters=dict(type='dict', required=False) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = SGRestAPI(self.module) + + def get_subset_info(self, gather_subset_info): + """ + Gather StorageGRID information for the given subset using REST APIs + Input for REST APIs call : (api, data) + return gathered_sg_info + """ + + api = gather_subset_info['api_call'] + data = {} + # allow for passing in any additional rest api parameters + if self.parameters.get('parameters'): + for each in self.parameters['parameters']: + data[each] = self.parameters['parameters'][each] + + gathered_sg_info, error = self.rest_api.get(api, data) + + if error: + self.module.fail_json(msg=error) + else: + return gathered_sg_info + + return None + + def convert_subsets(self): + """ Convert an info to the REST API """ + info_to_rest_mapping = { + 'grid_accounts_info': 'grid/accounts', + 'grid_alarms_info': 'grid/alarms', + 'grid_audit_info': 'grid/audit', + 'grid_compliance_global_info': 'grid/compliance-global', + 'grid_config_info': 'grid/config', + 'grid_config_management_info': 'grid/config/management', + 'grid_config_product_version_info': 'grid/config/product-version', + 'grid_deactivated_features_info': 'grid/deactivated-features', + 'grid_dns_servers_info': 'grid/dns-servers', + 'grid_domain_names_info': 'grid/domain-names', + 'grid_ec_profiles_info': 'grid/ec-profiles', + 'grid_expansion_info': 'grid/expansion', + 'grid_expansion_nodes_info': 'grid/expansion/nodes', + 'grid_expansion_sites_info': 'grid/expansion/sites', + 'grid_grid_networks_info': 'grid/grid-networks', + 'grid_groups_info': 'grid/groups', + 'grid_health_info': 'grid/health', + 'grid_health_topology_info': 'grid/health/topology', + 'grid_identity_source_info': 'grid/identity-source', + 'grid_ilm_criteria_info': 'grid/ilm-criteria', + 'grid_ilm_policies_info': 'grid/ilm-policies', + 'grid_ilm_rules_info': 'grid/ilm-rules', + 'grid_license_info': 'grid/license', + 'grid_management_certificate_info': 'grid/management-certificate', + 'grid_ntp_servers_info': 'grid/ntp-servers', + 'grid_recovery_available_nodes_info': 'grid/recovery/available-nodes', + 'grid_recovery_info': 'grid/recovery', + 'grid_regions_info': 'grid/regions', + 'grid_schemes_info': 'grid/schemes', + 'grid_snmp_info': 'grid/snmp', + 'grid_storage_api_certificate_info': 'grid/storage-api-certificate', + 'grid_untrusted_client_network_info': 'grid/untrusted-client-network', + 'grid_users_info': 'grid/users', + 'grid_users_root_info': 'grid/users/root', + 'versions_info': 'versions', + } + # Add rest API names as there info version, also make sure we don't add a duplicate + subsets = [] + for subset in self.parameters['gather_subset']: + if subset in info_to_rest_mapping: + if info_to_rest_mapping[subset] not in subsets: + subsets.append(info_to_rest_mapping[subset]) + else: + if subset not in subsets: + subsets.append(subset) + return subsets + + def apply(self): + """ Perform pre-checks, call functions and exit """ + + result_message = dict() + + # Defining gather_subset and appropriate api_call + get_sg_subset_info = { + 'grid/accounts': { + 'api_call': 'api/v3/grid/accounts', + }, + 'grid/alarms': { + 'api_call': 'api/v3/grid/alarms', + }, + 'grid/audit': { + 'api_call': 'api/v3/grid/audit', + }, + 'grid/compliance-global': { + 'api_call': 'api/v3/grid/compliance-global', + }, + 'grid/config': { + 'api_call': 'api/v3/grid/config', + }, + 'grid/config/management': { + 'api_call': 'api/v3/grid/config/management', + }, + 'grid/config/product-version': { + 'api_call': 'api/v3/grid/config/product-version', + }, + 'grid/deactivated-features': { + 'api_call': 'api/v3/grid/deactivated-features', + }, + 'grid/dns-servers': { + 'api_call': 'api/v3/grid/dns-servers', + }, + 'grid/domain-names': { + 'api_call': 'api/v3/grid/domain-names', + }, + 'grid/ec-profiles': { + 'api_call': 'api/v3/grid/ec-profiles', + }, + 'grid/expansion': { + 'api_call': 'api/v3/grid/expansion', + }, + 'grid/expansion/nodes': { + 'api_call': 'api/v3/grid/expansion/nodes', + }, + 'grid/expansion/sites': { + 'api_call': 'api/v3/grid/expansion/sites', + }, + 'grid/grid-networks': { + 'api_call': 'api/v3/grid/grid-networks', + }, + 'grid/groups': { + 'api_call': 'api/v3/grid/groups', + }, + 'grid/health': { + 'api_call': 'api/v3/grid/health', + }, + 'grid/health/topology': { + 'api_call': 'api/v3/grid/health/topology', + }, + 'grid/identity-source': { + 'api_call': 'api/v3/grid/identity-source', + }, + 'grid/ilm-criteria': { + 'api_call': 'api/v3/grid/ilm-criteria', + }, + 'grid/ilm-policies': { + 'api_call': 'api/v3/grid/ilm-policies', + }, + 'grid/ilm-rules': { + 'api_call': 'api/v3/grid/ilm-rules', + }, + 'grid/license': { + 'api_call': 'api/v3/grid/license', + }, + 'grid/management-certificate': { + 'api_call': 'api/v3/grid/management-certificate', + }, + 'grid/ntp-servers': { + 'api_call': 'api/v3/grid/ntp-servers', + }, + 'grid/recovery/available-nodes': { + 'api_call': 'api/v3/grid/recovery/available-nodes', + }, + 'grid/recovery': { + 'api_call': 'api/v3/grid/recovery', + }, + 'grid/regions': { + 'api_call': 'api/v3/grid/regions', + }, + 'grid/schemes': { + 'api_call': 'api/v3/grid/schemes', + }, + 'grid/snmp': { + 'api_call': 'api/v3/grid/snmp', + }, + 'grid/storage-api-certificate': { + 'api_call': 'api/v3/grid/storage-api-certificate', + }, + 'grid/untrusted-client-network': { + 'api_call': 'api/v3/grid/untrusted-client-network', + }, + 'grid/users': { + 'api_call': 'api/v3/grid/users', + }, + 'grid/users/root': { + 'api_call': 'api/v3/grid/users/root', + }, + 'versions': { + 'api_call': 'api/v3/versions', + }, + } + + if 'all' in self.parameters['gather_subset']: + # If all in subset list, get the information of all subsets + self.parameters['gather_subset'] = sorted(get_sg_subset_info.keys()) + + converted_subsets = self.convert_subsets() + + for subset in converted_subsets: + try: + # Verify whether the supported subset passed + specified_subset = get_sg_subset_info[subset] + except KeyError: + self.module.fail_json(msg="Specified subset %s not found, supported subsets are %s" % + (subset, list(get_sg_subset_info.keys()))) + + result_message[subset] = self.get_subset_info(specified_subset) + + self.module.exit_json(changed='False', sg_info=result_message) + + +def main(): + """ Main function """ + obj = NetAppSgGatherInfo() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ntp.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ntp.py new file mode 100644 index 000000000..0c22ba2c1 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_ntp.py @@ -0,0 +1,173 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Grid NTP Servers""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_grid_ntp +short_description: NetApp StorageGRID manage external NTP servers for the grid. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@jkandati) +description: +- Update NTP server on NetApp StorageGRID. +options: + state: + description: + - Whether the specified user should exist or not. + type: str + choices: ['present'] + default: present + ntp_servers: + description: + - List of comma separated NTP server address. + type: list + elements: str + required: true + passphrase: + description: + - passphrase for GRID. + type: str + required: true +""" + +EXAMPLES = """ + - name: update NTP servers + netapp.storagegrid.na_sg_grid_ntp: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + passphrase: "{{ grid_pass }}" + ntp_servers: "x.x.x.x,xx.x.xx.x" +""" + +RETURN = """ +resp: + description: Returns information about the configured NTP servers. + returned: success + type: list + elements: str + sample: ["10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4"] +""" + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridNtp(object): + """ + Create, modify and delete NTP entries for StorageGRID + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present"], default="present"), + ntp_servers=dict(required=True, type="list", elements="str"), + passphrase=dict(required=True, type="str", no_log=True), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + # required_if=[("state", "present", ["state", "name", "protocol"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = self.parameters["ntp_servers"] + self.passphrase = self.parameters["passphrase"] + self.ntp_input = {"passphrase": self.passphrase, "servers": self.data} + + def get_grid_ntp(self): + # Check if tenant account exists + # Return tenant account info if found, or None + api = "api/v3/grid/ntp-servers" + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def update_grid_ntp(self): + api = "api/v3/grid/ntp-servers/update" + + response, error = self.rest_api.post(api, self.ntp_input) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + grid_ntp = self.get_grid_ntp() + + cd_action = self.na_helper.get_cd_action(grid_ntp, self.parameters["ntp_servers"]) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + ntp_diff = [i for i in self.data + grid_ntp if i not in self.data or i not in grid_ntp] + if ntp_diff: + update = True + + if update: + self.na_helper.changed = True + + result_message = "" + resp_data = grid_ntp + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + resp_data = self.update_grid_ntp() + result_message = "Grid NTP updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_ntp = SgGridNtp() + na_sg_grid_ntp.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_regions.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_regions.py new file mode 100644 index 000000000..58179cf03 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_regions.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Grid Regions""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_grid_regions +short_description: NetApp StorageGRID manage Regions for the grid. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Users within a NetApp StorageGRID tenant. +options: + state: + description: + - Whether the specified user should exist or not. + type: str + choices: ['present'] + default: present + regions: + description: + - List of regions + required: true + type: list + elements: str +""" + +EXAMPLES = """ + - name: update Regions + netapp.storagegrid.na_sg_grid_regions: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + regions: "us-east-1" +""" + +RETURN = """ +resp: + description: Returns information about the configured regions. + returned: success + type: list + elements: str + sample: ["us-east-1", "us-central-1"] +""" + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridRegions(object): + """ + Create, modify and delete Regions for StorageGRID + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present"], default="present"), + regions=dict(required=True, type="list", elements="str"), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + # required_if=[("state", "present", ["state", "name", "protocol"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = self.parameters["regions"] + + def get_grid_regions(self): + # Check if tenant account exists + # Return tenant account info if found, or None + api = "api/v3/grid/regions" + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def update_grid_regions(self): + api = "api/v3/grid/regions" + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + grid_regions = self.get_grid_regions() + + cd_action = self.na_helper.get_cd_action(grid_regions, self.parameters["regions"]) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + regions_diff = [i for i in self.data + grid_regions if i not in self.data or i not in grid_regions] + if regions_diff: + update = True + + if update: + self.na_helper.changed = True + + result_message = "" + resp_data = grid_regions + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + resp_data = self.update_grid_regions() + result_message = "Grid Regions updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_regions = SgGridRegions() + na_sg_grid_regions.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_traffic_classes.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_traffic_classes.py new file mode 100644 index 000000000..9901a3e00 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_traffic_classes.py @@ -0,0 +1,375 @@ +#!/usr/bin/python + +# (c) 2022, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Traffic Classification Policies""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_sg_grid_traffic_classes +short_description: Manage Traffic Classification Policy configuration on StorageGRID. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.10.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Traffic Classification Policies on NetApp StorageGRID. +options: + state: + description: + - Whether the specified Traffic Classification Policy should exist. + type: str + choices: ['present', 'absent'] + default: present + name: + description: + - Name of the Traffic Classification Policy. + type: str + policy_id: + description: + - Traffic Classification Policy ID. + - May be used for modify or delete operation. + type: str + description: + description: + - Description of the Traffic Classification Policy. + type: str + matchers: + description: + - A set of parameters to match. + - The traffic class will match requests where any of these matchers match. + type: list + elements: dict + suboptions: + type: + description: + - The attribute of the request to match. + - C(bucket) - The S3 bucket (or Swift container) being accessed. + - C(bucket-regex) - A regular expression to evaluate against the S3 bucket (or Swift container) being accessed. + - C(cidr) - Matches if the client request source IP is in the specified IPv4 CIDR (RFC4632). + - C(tenant) - Matches if the S3 bucket (or Swift container) is owned by the tenant account with this ID. + choices: ['bucket', 'bucket-regex', 'cidr', 'endpoint', 'tenant'] + type: str + required: true + inverse: + description: + - If I(true), entities that match the value are excluded. + type: bool + default: false + members: + description: + - A list of members to match on. + type: list + elements: str + required: true + limits: + description: + - Optional limits to impose on client requests matched by this traffic class. + - Only one of each limit type can be specified. + type: list + elements: dict + suboptions: + type: + description: + - The type of limit to apply. + - C(aggregateBandwidthIn) - The maximum combined upload bandwidth in bytes/second of all concurrent requests that match this policy. + - C(aggregateBandwidthOut) - The maximum combined download bandwidth in bytes/second of all concurrent requests that match this policy. + - C(concurrentReadRequests) - The maximum number of download requests that can be in progress at the same time. + - C(concurrentWriteRequests) - The maximum number of upload requests that can be in progress at the same time. + - C(readRequestRate) - The maximum number of download requests that can be started each second. + - C(writeRequestRate) - The maximum number of download requests that can be started each second. + - C(perRequestBandwidthIn) - The maximum upload bandwidth in bytes/second allowed for each request that matches this policy. + - C(perRequestBandwidthOut) - The maximum download bandwidth in bytes/second allowed for each request that matches this policy. + choices: [ + 'aggregateBandwidthIn', + 'aggregateBandwidthOut', + 'concurrentReadRequests', + 'concurrentWriteRequests', + 'readRequestRate', + 'writeRequestRate', + 'perRequestBandwidthIn', + 'perRequestBandwidthOut' + ] + type: str + required: true + value: + description: + - The limit to apply. + - Limit values are type specific. + type: int + required: true +""" + +EXAMPLES = """ + - name: create Traffic Classification Policy with bandwidth limit on buckets + netapp.storagegrid.na_sg_grid_traffic_classes: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: Traffic-Policy1 + matchers: + - type: bucket + members: bucket1,anotherbucket + limits: + - type: aggregateBandwidthOut + value: 100000000 + + - name: create Traffic Classification Policy with bandwidth limits except for specific tenant account + netapp.storagegrid.na_sg_grid_traffic_classes: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: Fabricpool-Policy + description: "Limit all to 500MB/s except FabricPool tenant" + matchers: + - type: tenant + inverse: True + members: 12345678901234567890 + limits: + - type: aggregateBandwidthIn + value: 50000000 + - type: aggregateBandwidthOut + value: 50000000 + + - name: rename Traffic Classification Policy + netapp.storagegrid.na_sg_grid_traffic_classes: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + policy_id: 00000000-0000-0000-0000-000000000000 + name: Traffic-Policy1-New-Name + matchers: + - type: bucket + members: bucket1,anotherbucket + limits: + - type: aggregateBandwidthOut + value: 100000000 + + - name: delete Traffic Classification Policy + netapp.storagegrid.na_sg_grid_traffic_classes: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: absent + name: Traffic-Policy1 +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID Traffic Classification Policy. + returned: success + type: dict + sample: { + "id": "6b2946e6-7fed-40d0-9262-8e922580aba7", + "name": "Traffic-Policy1", + "description": "Traffic Classification Policy 1", + "matchers": [ + { + "type": "cidr", + "inverse": False, + "members": [ + "192.168.50.0/24" + ] + }, + { + "type": "bucket", + "inverse": False, + "members": [ + "mybucket1", + "mybucket2" + ] + }, + ], + "limits": [ + { + "type": "aggregateBandwidthOut", + "value": 100000000 + } + ], + } +""" + +import json + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridTrafficClasses: + """ + Create, modify and delete Traffic Classification Policies for StorageGRID + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + name=dict(required=False, type="str"), + policy_id=dict(required=False, type="str"), + description=dict(required=False, type="str"), + matchers=dict( + required=False, + type="list", + elements="dict", + options=dict( + type=dict( + required=True, + type="str", + choices=["bucket", "bucket-regex", "cidr", "endpoint", "tenant"], + ), + inverse=dict(required=False, type="bool", default="false"), + members=dict(required=True, type="list", elements="str"), + ), + ), + limits=dict( + required=False, + type="list", + elements="dict", + options=dict( + type=dict( + required=True, + type="str", + choices=[ + "aggregateBandwidthIn", + "aggregateBandwidthOut", + "concurrentReadRequests", + "concurrentWriteRequests", + "readRequestRate", + "writeRequestRate", + "perRequestBandwidthIn", + "perRequestBandwidthOut", + ], + ), + value=dict(required=True, type="int"), + ), + ), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["name"])], + required_one_of=[("name", "policy_id")], + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + + if self.parameters["state"] == "present": + for k in ["name", "description", "matchers", "limits"]: + if self.parameters.get(k) is not None: + self.data[k] = self.parameters[k] + + def get_traffic_class_policy_id(self): + # Check if Traffic Classification Policy exists + # Return policy ID if found, or None + api = "api/v3/grid/traffic-classes/policies" + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return next((item["id"] for item in response.get("data") if item["name"] == self.parameters["name"]), None) + + def get_traffic_class_policy(self, policy_id): + api = "api/v3/grid/traffic-classes/policies/%s" % policy_id + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def create_traffic_class_policy(self): + api = "api/v3/grid/traffic-classes/policies" + # self.module.fail_json(msg=self.data) + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_traffic_class_policy(self, policy_id): + api = "api/v3/grid/traffic-classes/policies/%s" % policy_id + dummy, error = self.rest_api.delete(api, self.data) + + if error: + self.module.fail_json(msg=error) + + def update_traffic_class_policy(self, policy_id): + api = "api/v3/grid/traffic-classes/policies/%s" % policy_id + response, error = self.rest_api.put(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + + traffic_class_policy = None + + if self.parameters.get("policy_id"): + traffic_class_policy = self.get_traffic_class_policy(self.parameters["policy_id"]) + else: + policy_id = self.get_traffic_class_policy_id() + if policy_id: + traffic_class_policy = self.get_traffic_class_policy(policy_id) + + cd_action = self.na_helper.get_cd_action(traffic_class_policy, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + modify = self.na_helper.get_modified_attributes(traffic_class_policy, self.data) + + result_message = "" + resp_data = {} + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "delete": + self.delete_traffic_class_policy(traffic_class_policy["id"]) + result_message = "Traffic Classification Policy deleted" + elif cd_action == "create": + resp_data = self.create_traffic_class_policy() + result_message = "Traffic Classification Policy created" + elif modify: + resp_data = self.update_traffic_class_policy(traffic_class_policy["id"]) + result_message = "Traffic Classification Policy updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_traffic_classes = SgGridTrafficClasses() + na_sg_grid_traffic_classes.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_user.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_user.py new file mode 100644 index 000000000..521d4f566 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_grid_user.py @@ -0,0 +1,316 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Grid-administration Users""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_grid_user +short_description: NetApp StorageGRID manage users. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Administrative Users within NetApp StorageGRID. +options: + state: + description: + - Whether the specified user should exist or not. + type: str + choices: ['present', 'absent'] + default: present + full_name: + description: + - Full Name of the user. + - Required for create operation + type: str + unique_name: + description: + - Unique Name for the user. Must begin with C(user/) or C(federated-user/) + - Required for create, modify or delete operation. + type: str + required: true + member_of: + description: + - List of C(unique_groups) that the user is a member of. + type: list + elements: str + password: + description: + - Set a password for a local user. Does not apply to federated users. + - Requires root privilege. + required: false + type: str + update_password: + description: + - Choose when to update the password. + - When set to C(always), the password will always be updated. + - When set to C(on_create), the password will only be set upon a new user creation. + default: on_create + choices: + - on_create + - always + type: str + disable: + description: + - Disable the user from signing in. Does not apply to federated users. + type: bool +""" + +EXAMPLES = """ + - name: create a user + netapp.storagegrid.na_sg_grid_user: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + full_name: ansibleuser100 + unique_name: user/ansibleuser100 + member_of: "group/ansiblegroup100" + disable: false + +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID Grid user. + returned: always + type: dict + sample: { + "fullName": "Example User", + "memberOf": ["00000000-0000-0000-0000-000000000000"], + "disable": false, + "uniqueName": "user/Example", + "accountId": "0", + "id": "00000000-0000-0000-0000-000000000000", + "federated": false, + "userURN": "urn:sgws:identity::0:user/Example" + } +""" + +import json +import re + + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgGridUser(object): + """ + Create, modify and delete user within a StorageGRID Tenant Account + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + full_name=dict(required=False, type="str"), + unique_name=dict(required=True, type="str"), + member_of=dict(required=False, type="list", elements="str"), + disable=dict(required=False, type="bool"), + password=dict(required=False, type="str", no_log=True), + update_password=dict(default="on_create", choices=["on_create", "always"]), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["full_name", "unique_name"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + self.data["memberOf"] = [] + if self.parameters.get("full_name"): + self.data["fullName"] = self.parameters["full_name"] + if self.parameters.get("unique_name"): + self.data["uniqueName"] = self.parameters["unique_name"] + + if self.parameters.get("disable") is not None: + self.data["disable"] = self.parameters["disable"] + + re_local_user = re.compile("^user/") + re_fed_user = re.compile("^federated-user/") + + if ( + re_local_user.match(self.parameters["unique_name"]) is None + and re_fed_user.match(self.parameters["unique_name"]) is None + ): + self.module.fail_json(msg="unique_name must begin with 'user/' or 'federated-user/'") + + self.pw_change = {} + if self.parameters.get("password") is not None: + if re_fed_user.match(self.parameters["unique_name"]): + self.module.fail_json(msg="password cannot be set for a federated user") + self.pw_change["password"] = self.parameters["password"] + + def get_grid_groups(self): + # Get list of admin groups + # Retrun mapping of uniqueName to ids if found, or None + api = "api/v3/grid/groups?limit=350" + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + if response["data"]: + name_to_id_map = dict(zip([i["uniqueName"] for i in response["data"]], [j["id"] for j in response["data"]])) + return name_to_id_map + + return None + + def get_grid_user(self, unique_name): + # Use the unique name to check if the user exists + api = "api/v3/grid/users/%s" % unique_name + response, error = self.rest_api.get(api) + + if error: + if response["code"] != 404: + self.module.fail_json(msg=error["text"]) + else: + return response["data"] + return None + + def create_grid_user(self): + api = "api/v3/grid/users" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error["text"]) + + return response["data"] + + def delete_grid_user(self, user_id): + api = "api/v3/grid/users/" + user_id + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def update_grid_user(self, user_id): + api = "api/v3/grid/users/" + user_id + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error["text"]) + + return response["data"] + + def set_grid_user_password(self, unique_name): + api = "api/v3/grid/users/%s/change-password" % unique_name + response, error = self.rest_api.post(api, self.pw_change) + + if error: + self.module.fail_json(msg=error["text"]) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + grid_user = self.get_grid_user(self.parameters["unique_name"]) + + if self.parameters.get("member_of"): + grid_groups = self.get_grid_groups() + try: + self.data["memberOf"] = [grid_groups[x] for x in self.parameters["member_of"]] + except KeyError as e: + self.module.fail_json(msg="Invalid unique_group supplied: '%s' not found" % e.args[0]) + + cd_action = self.na_helper.get_cd_action(grid_user, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + if grid_user["memberOf"] is None: + member_of_diff = [] + else: + member_of_diff = [ + i + for i in self.data["memberOf"] + grid_user["memberOf"] + if i not in self.data["memberOf"] or i not in grid_user["memberOf"] + ] + if member_of_diff: + update = True + + if self.parameters.get("disable") is not None and self.parameters["disable"] != grid_user.get("disable"): + update = True + + if update: + self.na_helper.changed = True + result_message = "" + resp_data = grid_user + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.delete_grid_user(grid_user["id"]) + result_message = "Grid User deleted" + + elif cd_action == "create": + resp_data = self.create_grid_user() + result_message = "Grid User created" + + else: + resp_data = self.update_grid_user(grid_user["id"]) + result_message = "Grid User updated" + + # If a password has been set + if self.pw_change: + if self.module.check_mode: + pass + else: + # Only update the password if update_password is always, or a create activity has occurred + if cd_action == "create" or self.parameters["update_password"] == "always": + self.set_grid_user_password(self.parameters["unique_name"]) + self.na_helper.changed = True + + results = [result_message, "Grid User password updated"] + result_message = "; ".join(filter(None, results)) + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_grid_user = SgGridUser() + na_sg_grid_user.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_container.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_container.py new file mode 100644 index 000000000..da9663184 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_container.py @@ -0,0 +1,352 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Buckets""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_org_container +short_description: Manage buckets on StorageGRID. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create S3 buckets on NetApp StorageGRID. +options: + state: + description: + - Whether the specified bucket should exist or not. + type: str + choices: ['present', 'absent'] + default: present + name: + description: + - Name of the bucket. + required: true + type: str + region: + description: + - Set a region for the bucket. + type: str + compliance: + description: + - Configure compliance settings for an S3 bucket. + - Cannot be specified along with I(s3_object_lock_enabled). + type: dict + suboptions: + auto_delete: + description: + - If enabled, objects will be deleted automatically when its retention period expires, unless the bucket is under a legal hold. + type: bool + legal_hold: + description: + - If enabled, objects in this bucket cannot be deleted, even if their retention period has expired. + type: bool + retention_period_minutes: + description: + - specify the length of the retention period for objects added to this bucket, in minutes. + type: int + s3_object_lock_enabled: + description: + - Enable S3 Object Lock on the bucket. + - S3 Object Lock requires StorageGRID 11.5 or greater. + type: bool + version_added: '21.9.0' + bucket_versioning_enabled: + description: + - Enable versioning on the bucket. + - This API requires StorageGRID 11.6 or greater. + type: bool + version_added: '21.11.0' +""" + +EXAMPLES = """ + - name: create a s3 bucket + netapp.storagegrid.na_sg_org_container: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: ansiblebucket1 + + - name: delete a s3 bucket + netapp.storagegrid.na_sg_org_container: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: absent + name: ansiblebucket1 + + - name: create a s3 bucket with Object Lock + netapp.storagegrid.na_sg_org_container: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: objectlock-bucket1 + s3_object_lock_enabled: true + + - name: create a s3 bucket with versioning enabled + netapp.storagegrid.na_sg_org_container: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + name: ansiblebucket1 + bucket_versioning_enabled: true +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID bucket. + returned: always + type: dict + sample: { + "name": "example-bucket", + "creationTime": "2021-01-01T00:00:00.000Z", + "region": "us-east-1", + "compliance": { + "autoDelete": false, + "legalHold": false, + "retentionPeriodMinutes": 2629800 + }, + "s3ObjectLock": { + "enabled": false + } + } +""" + +import json + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgOrgContainer(object): + """ + Create, modify and delete StorageGRID Tenant Account + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + name=dict(required=True, type="str"), + region=dict(required=False, type="str"), + compliance=dict( + required=False, + type="dict", + options=dict( + auto_delete=dict(required=False, type="bool"), + legal_hold=dict(required=False, type="bool"), + retention_period_minutes=dict(required=False, type="int"), + ), + ), + s3_object_lock_enabled=dict(required=False, type="bool"), + bucket_versioning_enabled=dict(required=False, type="bool"), + ) + ) + parameter_map = { + "auto_delete": "autoDelete", + "legal_hold": "legalHold", + "retention_period_minutes": "retentionPeriodMinutes", + } + self.module = AnsibleModule( + argument_spec=self.argument_spec, + mutually_exclusive=[("compliance", "s3_object_lock_enabled")], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Get API version + self.rest_api.get_sg_product_version(api_root="org") + + # Checking for the parameters passed and create new parameters list + + self.data_versioning = {} + self.data_versioning["versioningSuspended"] = True + + self.data = {} + self.data["name"] = self.parameters["name"] + self.data["region"] = self.parameters.get("region") + if self.parameters.get("compliance"): + self.data["compliance"] = dict( + (parameter_map[k], v) for (k, v) in self.parameters["compliance"].items() if v is not None + ) + + if self.parameters.get("s3_object_lock_enabled") is not None: + self.rest_api.fail_if_not_sg_minimum_version("S3 Object Lock", 11, 5) + self.data["s3ObjectLock"] = dict(enabled=self.parameters["s3_object_lock_enabled"]) + + if self.parameters.get("bucket_versioning_enabled") is not None: + self.rest_api.fail_if_not_sg_minimum_version("Bucket versioning configuration", 11, 6) + self.data_versioning["versioningEnabled"] = self.parameters["bucket_versioning_enabled"] + if self.data_versioning["versioningEnabled"]: + self.data_versioning["versioningSuspended"] = False + + def get_org_container(self): + # Check if bucket/container exists + # Return info if found, or None + + params = {"include": "compliance,region"} + response, error = self.rest_api.get("api/v3/org/containers", params=params) + + if error: + self.module.fail_json(msg=error) + + for container in response["data"]: + if container["name"] == self.parameters["name"]: + return container + + return None + + def create_org_container(self): + api = "api/v3/org/containers" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def get_org_container_versioning(self): + api = "api/v3/org/containers/%s/versioning" % self.parameters["name"] + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def update_org_container_versioning(self): + api = "api/v3/org/containers/%s/versioning" % self.parameters["name"] + + response, error = self.rest_api.put(api, self.data_versioning) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def fail_if_global_object_lock_disabled(self): + api = "api/v3/org/compliance-global" + + response, error = self.rest_api.get(api) + if error: + self.module.fail_json(msg=error) + + if not response["data"]["complianceEnabled"]: + self.module.fail_json(msg="Error: Global S3 Object Lock setting is not enabled.") + + def update_org_container_compliance(self): + api = "api/v3/org/containers/%s/compliance" % self.parameters["name"] + + response, error = self.rest_api.put(api, self.data["compliance"]) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_org_container(self): + api = "api/v3/org/containers/%s" % self.parameters["name"] + + response, error = self.rest_api.delete(api, None) + if error: + self.module.fail_json(msg=error["text"]) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + versioning_config = None + update_versioning = False + + org_container = self.get_org_container() + + if org_container and self.parameters.get("bucket_versioning_enabled") is not None: + versioning_config = self.get_org_container_versioning() + + cd_action = self.na_helper.get_cd_action(org_container, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update_compliance = False + + if self.parameters.get("compliance") and org_container.get("compliance") != self.data["compliance"]: + update_compliance = True + self.na_helper.changed = True + + if ( + versioning_config + and versioning_config["versioningEnabled"] != self.data_versioning["versioningEnabled"] + ): + update_versioning = True + self.na_helper.changed = True + + result_message = "" + resp_data = org_container + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.delete_org_container() + resp_data = None + result_message = "Org Container deleted" + + elif cd_action == "create": + if self.parameters.get("s3_object_lock_enabled"): # if it is set and true + self.fail_if_global_object_lock_disabled() + + resp_data = self.create_org_container() + + if self.parameters.get("bucket_versioning_enabled") is not None: + self.update_org_container_versioning() + result_message = "Org Container created" + + else: + if update_compliance: + resp_data = self.update_org_container_compliance() + if update_versioning: + self.update_org_container_versioning() + result_message = "Org Container updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_org_container = SgOrgContainer() + na_sg_org_container.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_group.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_group.py new file mode 100644 index 000000000..d13a7559a --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_group.py @@ -0,0 +1,301 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage tenant Groups""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_org_group +short_description: NetApp StorageGRID manage groups within a tenancy. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Groups within NetApp StorageGRID tenant. +options: + state: + description: + - Whether the specified group should exist or not. + type: str + choices: ['present', 'absent'] + default: present + unique_name: + description: + - Unique Name for the group. Must begin with C(group/) or C(federated-group/). + - Required for create, modify or delete operation. + type: str + required: true + display_name: + description: + - Name of the group. + - Required for create operation. + type: str + management_policy: + description: + - Management access controls granted to the group within the tenancy. + type: dict + suboptions: + manage_all_containers: + description: + - Allows users to manage the settings for all S3 buckets in the tenant account, regardless of S3 bucket or group policies. + type: bool + manage_endpoints: + description: + - Allows users to use the Tenant Manager or the Tenant Management API to create or edit endpoints. + - Endpoints are used as the destination for StorageGRID platform services. + type: bool + manage_own_s3_credentials: + description: + - Allows users to create and remove their own S3 access keys. + - Users who do not have this permission do not see the S3 > My Credentials menu option. + type: bool + root_access: + description: + - Provides full access to the Tenant Manager and the Tenant Management API. + type: bool + s3_policy: + description: + - StorageGRID S3 Group Policy. + default: "" + type: json +""" + +EXAMPLES = """ + - name: create a group + netapp.storagegrid.na_sg_org_group: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + display_name: ansiblegroup1 + unique_name: group/ansiblegroup1 + management_policy: + manage_all_containers: true + manage_endpoints: true + manage_own_s3_credentials: false + root_access: false + s3_policy: {"Statement":[{"Effect":"Deny","Action":"s3:*","Resource":"arn:aws:s3:::*"}]} +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID tenant group attributes. + returned: success + type: dict + sample: { + "displayName": "Example Group", + "policies": { + "management": { + "manageAllContainers": true, + "manageEndpoints": true, + "manageOwnS3Credentials": true, + "rootAccess": true + }, + "s3": {...}, + "swift": {...} + }, + "uniqueName": "group/examplegroup", + "accountId": "12345678901234567890", + "id": "00000000-0000-0000-0000-000000000000", + "federated": false, + "groupURN": "urn:sgws:identity::12345678901234567890:group/examplegroup" + } +""" + +import json +import re + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgOrgGroup(object): + """ + Create, modify and delete StorageGRID Tenant Account + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + display_name=dict(required=False, type="str"), + unique_name=dict(required=True, type="str"), + management_policy=dict( + required=False, + type="dict", + options=dict( + manage_all_containers=dict(required=False, type="bool"), + manage_endpoints=dict(required=False, type="bool"), + manage_own_s3_credentials=dict(required=False, type="bool"), + root_access=dict(required=False, type="bool"), + ), + ), + s3_policy=dict(required=False, type="json"), + ) + ) + parameter_map = { + "manage_all_containers": "manageAllContainers", + "manage_endpoints": "manageEndpoints", + "manage_own_s3_credentials": "manageOwnS3Credentials", + "root_access": "rootAccess", + } + self.module = AnsibleModule( + argument_spec=self.argument_spec, + # required_if=[("state", "present", ["display_name"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + self.data["displayName"] = self.parameters.get("display_name") + self.data["uniqueName"] = self.parameters["unique_name"] + # Only add the parameter if value is True, as JSON response does not include non-true objects + self.data["policies"] = {} + + if self.parameters.get("management_policy"): + self.data["policies"] = { + "management": dict( + (parameter_map[k], v) for (k, v) in self.parameters["management_policy"].items() if v + ) + } + if not self.data["policies"].get("management"): + self.data["policies"]["management"] = None + + if self.parameters.get("s3_policy"): + try: + self.data["policies"]["s3"] = json.loads(self.parameters["s3_policy"]) + except ValueError: + self.module.fail_json(msg="Failed to decode s3_policy. Invalid JSON.") + + self.re_local_group = re.compile("^group/") + self.re_fed_group = re.compile("^federated-group/") + + if ( + self.re_local_group.match(self.parameters["unique_name"]) is None + and self.re_fed_group.match(self.parameters["unique_name"]) is None + ): + self.module.fail_json(msg="unique_name must begin with 'group/' or 'federated-group/'") + + def get_org_group(self, unique_name): + # Use the unique name to check if the group exists + api = "api/v3/org/groups/%s" % unique_name + response, error = self.rest_api.get(api) + + if error: + if response["code"] != 404: + self.module.fail_json(msg=error) + else: + return response["data"] + return None + + def create_org_group(self): + api = "api/v3/org/groups" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_org_group(self, group_id): + api = "api/v3/org/groups/" + group_id + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def update_org_group(self, group_id): + api = "api/v3/org/groups/" + group_id + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + org_group = self.get_org_group(self.parameters["unique_name"]) + + cd_action = self.na_helper.get_cd_action(org_group, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + if self.parameters.get("management_policy"): + if org_group.get("policies") is None or org_group.get("policies", {}).get("management") != self.data["policies"]["management"]: + update = True + if self.parameters.get("s3_policy"): + if org_group.get("policies") is None or org_group.get("policies", {}).get("s3") != self.data["policies"]["s3"]: + update = True + + if update: + self.na_helper.changed = True + result_message = "" + resp_data = org_group + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.delete_org_group(org_group["id"]) + result_message = "Org Group deleted" + + elif cd_action == "create": + resp_data = self.create_org_group() + result_message = "Org Group created" + + else: + # for a federated group, the displayName parameter needs to be specified + # and must match the existing displayName + if self.re_fed_group.match(self.parameters["unique_name"]): + self.data["displayName"] = org_group["displayName"] + + resp_data = self.update_org_group(org_group["id"]) + result_message = "Org Group updated" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_org_group = SgOrgGroup() + na_sg_org_group.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_identity_federation.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_identity_federation.py new file mode 100644 index 000000000..4b6811cd6 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_identity_federation.py @@ -0,0 +1,335 @@ +#!/usr/bin/python + +# (c) 2021, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Tenant Identity Federation""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_sg_org_identity_federation +short_description: NetApp StorageGRID manage Tenant identity federation. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '21.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Configure Tenant Identity Federation within NetApp StorageGRID. +- If module is run with C(check_mode), a connectivity test will be performed using the supplied values without changing the configuration. +- This module is idempotent if I(password) is not specified. +options: + state: + description: + - Whether identity federation should be enabled or not. + type: str + choices: ['present', 'absent'] + default: present + username: + description: + - The username to bind to the LDAP server. + type: str + password: + description: + - The password associated with the username. + type: str + hostname: + description: + - The hostname or IP address of the LDAP server. + type: str + port: + description: + - The port used to connect to the LDAP server. Typically 389 for LDAP, or 636 for LDAPS. + type: int + base_group_dn: + description: + - The Distinguished Name of the LDAP subtree to search for groups. + type: str + base_user_dn: + description: + - The Distinguished Name of the LDAP subtree to search for users. + type: str + ldap_service_type: + description: + - The type of LDAP server. + choices: ['Active Directory', 'OpenLDAP', 'Other'] + type: str + type: + description: + - The type of identity source. + - Default is 'ldap'. + type: str + default: ldap + ldap_user_id_attribute: + description: + - The LDAP attribute which contains the unique user name of a user. + - Should be configured if I(ldap_service_type=Other). + type: str + ldap_user_uuid_attribute: + description: + - The LDAP attribute which contains the permanent unique identity of a user. + - Should be configured if I(ldap_service_type=Other). + type: str + ldap_group_id_attribute: + description: + - The LDAP attribute which contains the group for a user. + - Should be configured if I(ldap_service_type=Other). + type: str + ldap_group_uuid_attribute: + description: + - The LDAP attribute which contains the group's permanent unique identity. + - Should be configured if I(ldap_service_type=Other). + type: str + tls: + description: + - Whether Transport Layer Security is used to connect to the LDAP server. + choices: ['STARTTLS', 'LDAPS', 'Disabled'] + type: str + default: STARTTLS + ca_cert: + description: + - Custom certificate used to connect to the LDAP server. + - If a custom certificate is not supplied, the operating system CA certificate will be used. + type: str +""" + +EXAMPLES = """ + - name: test identity federation configuration + netapp.storagegrid.na_sg_org_identity_federation: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + ldap_service_type: "Active Directory" + hostname: "ad.example.com" + port: 389 + username: "binduser" + password: "bindpass" + base_group_dn: "DC=example,DC=com" + base_user_dn: "DC=example,DC=com" + tls: "Disabled" + check_mode: yes + + - name: configure identity federation with AD and TLS + netapp.storagegrid.na_sg_org_identity_federation: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + ldap_service_type: "Active Directory" + hostname: "ad.example.com" + port: 636, + username: "binduser" + password: "bindpass" + base_group_dn: "DC=example,DC=com" + base_user_dn: "DC=example,DC=com" + tls: "LDAPS" + ca_cert: | + -----BEGIN CERTIFICATE----- + MIIC+jCCAeICCQDmn9Gow08LTzANBgkqhkiG9w0BAQsFADA/..swCQYDVQQGEwJV + bXBsZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB..JFzNIXQEGnsgjV + JGU4giuvOLOZ8Q3gyuUbkSUQDjmjpMR8PliwJ6iW2Ity89Dv..dl1TaIYI/ansyZ + Uxk4YXeN6kUkrDtNxCg1McALzXVAfxMTtj2SFlLxne4Z6rX2..UyftQrfM13F1vY + gK8dBPz+l+X/Uozo/xNm7gxe68p9le9/pcULst1CQn5/sPqq..kgWcSvlKUItu82 + lq3B2169rovdIaNdcvaQjMPhrDGo5rvLfMN35U3Hgbz41PL5..x2BcUE6/0ab5T4 + qKBxKa3t9twj+zpUqOzyL0PFfCE+SK5fEXAS1ow4eAcLN+eB..gR/PuvGAyIPCtE + 1+X4GrECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAFpO+04Ra..FMJPH6dBmzfb7l + k04BWTvSlur6HiQdXY+oFQMJZzyI7MQ8v9HBIzS0ZAzYWLp4..VZhHmRxnrWyxVs + u783V5YfQH2L4QnBDoiDefgxyfDs2PcoF5C+X9CGXmPqzst2..y/6tdOVJzdiA== + -----END CERTIFICATE----- +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID tenant account identity source configuration. + returned: success + type: dict + sample: { + "id": "00000000-0000-0000-0000-000000000000", + "disable": false, + "hostname": "10.1.2.3", + "port": 389, + "username": "MYDOMAIN\\\\Administrator", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "ldapServiceType": "Active Directory", + "type": "ldap", + "disableTLS": false, + "enableLDAPS": false, + "caCert": "-----BEGIN CERTIFICATE----- abcdefghijkl123456780ABCDEFGHIJKL 123456/7890ABCDEFabcdefghijklABCD -----END CERTIFICATE-----\n" + } +""" + +import json +import re + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgOrgIdentityFederation: + """ + Configure and modify StorageGRID Tenant Identity Federation + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + username=dict(required=False, type="str"), + password=dict(required=False, type="str", no_log=True), + hostname=dict(required=False, type="str"), + port=dict(required=False, type="int"), + base_group_dn=dict(required=False, type="str"), + base_user_dn=dict(required=False, type="str"), + ldap_service_type=dict(required=False, type="str", choices=["OpenLDAP", "Active Directory", "Other"]), + type=dict(required=False, type="str", default="ldap"), + ldap_user_id_attribute=dict(required=False, type="str"), + ldap_user_uuid_attribute=dict(required=False, type="str"), + ldap_group_id_attribute=dict(required=False, type="str"), + ldap_group_uuid_attribute=dict(required=False, type="str"), + tls=dict(required=False, type="str", choices=["STARTTLS", "LDAPS", "Disabled"], default="STARTTLS"), + ca_cert=dict(required=False, type="str"), + ), + ) + + parameter_map = { + "username": "username", + "password": "password", + "hostname": "hostname", + "port": "port", + "base_group_dn": "baseGroupDn", + "base_user_dn": "baseUserDn", + "ldap_service_type": "ldapServiceType", + "ldap_user_id_attribute": "ldapUserIdAttribute", + "ldap_user_uuid_attribute": "ldapUserUUIDAttribute", + "ldap_group_id_attribute": "ldapGroupIdAttribute", + "ldap_group_uuid_attribute": "ldapGroupUUIDAttribute", + "ca_cert": "caCert", + } + self.module = AnsibleModule(argument_spec=self.argument_spec, supports_check_mode=True,) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + + if self.parameters["state"] == "present": + self.data["disable"] = False + + for k in parameter_map.keys(): + if self.parameters.get(k) is not None: + self.data[parameter_map[k]] = self.parameters[k] + + if self.parameters.get("tls") == "STARTTLS": + self.data["disableTLS"] = False + self.data["enableLDAPS"] = False + elif self.parameters.get("tls") == "LDAPS": + self.data["disableTLS"] = False + self.data["enableLDAPS"] = True + else: + self.data["disableTLS"] = True + self.data["enableLDAPS"] = False + + def get_org_identity_source(self): + api = "api/v3/org/identity-source" + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + else: + return response["data"] + return None + + def update_identity_federation(self, test=False): + api = "api/v3/org/identity-source" + + params = {} + + if test: + params["test"] = True + + response, error = self.rest_api.put(api, self.data, params=params) + if error: + self.module.fail_json(msg=error, payload=self.data) + + if response is not None: + return response["data"] + else: + return None + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + org_identity_source = self.get_org_identity_source() + + cd_action = self.na_helper.get_cd_action(org_identity_source, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + for k in (i for i in self.data.keys() if i != "password"): + if self.data[k] != org_identity_source.get(k): + update = True + break + + # if a password has been specified we need to update it + if self.data.get("password") and self.parameters["state"] == "present": + update = True + self.module.warn("Password attribute has been specified. Task is not idempotent.") + + if update: + self.na_helper.changed = True + + if cd_action == "delete": + # if identity federation is already in a disable state + if org_identity_source.get("disable"): + self.na_helper.changed = False + + result_message = "" + resp_data = org_identity_source + + if self.na_helper.changed and not self.module.check_mode: + if cd_action == "delete": + self.data = dict(disable=True) + resp_data = self.update_identity_federation() + result_message = "Tenant identity federation disabled" + else: + resp_data = self.update_identity_federation() + result_message = "Tenant identity federation updated" + + if self.module.check_mode: + self.update_identity_federation(test=True) + # if no error, connection test successful + self.module.exit_json(changed=self.na_helper.changed, msg="Connection test successful") + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_org_identity_federation = SgOrgIdentityFederation() + na_sg_org_identity_federation.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_info.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_info.py new file mode 100644 index 000000000..b2d3c4e48 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_info.py @@ -0,0 +1,279 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" NetApp StorageGRID Org Info using REST APIs """ + + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +module: na_sg_org_info +author: NetApp Ansible Team (@jasonl4) +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +short_description: NetApp StorageGRID Org information gatherer. +description: + - This module allows you to gather various information about StorageGRID Org configuration. +version_added: 20.11.0 + +options: + gather_subset: + type: list + elements: str + description: + - When supplied, this argument will restrict the information collected to a given subset. + - Either the info name or the Rest API can be given. + - Possible values for this argument include + - C(org_compliance_global_info) or C(org/compliance-global) + - C(org_config_info) or C(org/config) + - C(org_config_product_version_info) or C(org/config/product-version) + - C(org_containers_info) or C(org/containers) + - C(org_deactivated_features_info) or C(org/deactivated-features) + - C(org_endpoints_info) or C(org/endpoints) + - C(org_groups_info) or C(org/groups) + - C(org_identity_source_info) or C(org/identity-source) + - C(org_regions_info) or C(org/regions) + - C(org_users_current_user_s3_access_keys_info) or C(org/users/current-user/s3-access-keys) + - C(org_usage_info) or C(org/usage) + - C(org_users_info) or C(org/users) + - C(org_users_root_info) or C(org/users/root) + - C(versions_info) or C(versions) + - Can specify a list of values to include a larger subset. + default: "all" + parameters: + description: + - Allows for any rest option to be passed in. + type: dict +""" + +EXAMPLES = """ +- name: Gather StorageGRID Org info + netapp.storagegrid.na_sg_org_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + register: sg_org_info + +- name: Gather StorageGRID Org info for org/containers and org/config subsets + netapp.storagegrid.na_sg_org_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + gather_subset: + - org_containers_info + - org/config + register: sg_org_info + +- name: Gather StorageGRID Org info for all subsets + netapp.storagegrid.na_sg_org_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + gather_subset: + - all + register: sg_org_info + +- name: Gather StorageGRID Org info for org/containers and org/users subsets, limit to 5 results for each subset + netapp.storagegrid.na_sg_org_info: + api_url: "https://1.2.3.4/" + auth_token: "storagegrid-auth-token" + validate_certs: false + gather_subset: + - org/containers + - org/users + parameters: + limit: 5 + register: sg_org_info +""" + +RETURN = """ +sg_info: + description: Returns various information about the StorageGRID Grid configuration. + returned: always + type: dict + sample: { + "org/compliance-global": {...}, + "org/config": {...}, + "org/config/product-version": {...}, + "org/containers": {...}, + "org/deactivated-features": {...}, + "org/endpoints": {...}, + "org/groups": {...}, + "org/identity-source": {...}, + "org/regions": {...}, + "org/users/current-user/s3-access-keys": {...}, + "org/usage": {...}, + "org/users": {...}, + "org/users/root": {...}, + "org/versions": {...} + } +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class NetAppSgGatherInfo(object): + """ Class with gather info methods """ + + def __init__(self): + """ + Parse arguments, setup variables, check parameters and ensure + request module is installed. + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update(dict( + gather_subset=dict(default=['all'], type='list', elements='str', required=False), + parameters=dict(type='dict', required=False) + )) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + # set up variables + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + self.rest_api = SGRestAPI(self.module) + + def get_subset_info(self, gather_subset_info): + """ + Gather StorageGRID information for the given subset using REST APIs + Input for REST APIs call : (api, data) + return gathered_sg_info + """ + + api = gather_subset_info['api_call'] + data = {} + # allow for passing in any additional rest api parameters + if self.parameters.get('parameters'): + for each in self.parameters['parameters']: + data[each] = self.parameters['parameters'][each] + + gathered_sg_info, error = self.rest_api.get(api, data) + + if error: + self.module.fail_json(msg=error) + else: + return gathered_sg_info + + return None + + def convert_subsets(self): + """ Convert an info to the REST API """ + info_to_rest_mapping = { + 'org_compliance_global_info': 'org/compliance-global', + 'org_config_info': 'org/config', + 'org_config_product_version_info': 'org/config/product-version', + 'org_containers_info': 'org/containers', + 'org_deactivated_features_info': 'org/deactivated-features', + 'org_endpoints_info': 'org/endpoints', + 'org_groups_info': 'org/groups', + 'org_identity_source_info': 'org/identity-source', + 'org_regions_info': 'org/regions', + 'org_users_current_user_s3_access_keys_info': 'org/users/current-user/s3-access-keys', + 'org_usage_info': 'org/usage', + 'org_users_info': 'org/users', + 'org_users_root_info': 'org/users/root', + 'versions_info': 'versions' + } + # Add rest API names as there info version, also make sure we don't add a duplicate + subsets = [] + for subset in self.parameters['gather_subset']: + if subset in info_to_rest_mapping: + if info_to_rest_mapping[subset] not in subsets: + subsets.append(info_to_rest_mapping[subset]) + else: + if subset not in subsets: + subsets.append(subset) + return subsets + + def apply(self): + """ Perform pre-checks, call functions and exit """ + + result_message = dict() + + # Defining gather_subset and appropriate api_call + get_sg_subset_info = { + 'org/compliance-global': { + 'api_call': 'api/v3/org/compliance-global', + }, + 'org/config': { + 'api_call': 'api/v3/org/config', + }, + 'org/config/product-version': { + 'api_call': 'api/v3/org/config/product-version', + }, + 'org/containers': { + 'api_call': 'api/v3/org/containers', + }, + 'org/deactivated-features': { + 'api_call': 'api/v3/org/deactivated-features', + }, + 'org/endpoints': { + 'api_call': 'api/v3/org/endpoints', + }, + 'org/groups': { + 'api_call': 'api/v3/org/groups', + }, + 'org/identity-source': { + 'api_call': 'api/v3/org/identity-source', + }, + 'org/regions': { + 'api_call': 'api/v3/org/regions', + }, + 'org/users/current-user/s3-access-keys': { + 'api_call': 'api/v3/org/users/current-user/s3-access-keys', + }, + 'org/usage': { + 'api_call': 'api/v3/org/usage', + }, + 'org/users': { + 'api_call': 'api/v3/org/users', + }, + 'org/users/root': { + 'api_call': 'api/v3/org/users/root', + }, + 'versions': { + 'api_call': 'api/v3/versions', + }, + } + + if 'all' in self.parameters['gather_subset']: + # If all in subset list, get the information of all subsets + self.parameters['gather_subset'] = sorted(get_sg_subset_info.keys()) + + converted_subsets = self.convert_subsets() + + for subset in converted_subsets: + try: + # Verify whether the supported subset passed + specified_subset = get_sg_subset_info[subset] + except KeyError: + self.module.fail_json(msg="Specified subset %s not found, supported subsets are %s" % + (subset, list(get_sg_subset_info.keys()))) + + result_message[subset] = self.get_subset_info(specified_subset) + + self.module.exit_json(changed='False', sg_info=result_message) + + +def main(): + """ Main function """ + obj = NetAppSgGatherInfo() + obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user.py new file mode 100644 index 000000000..455ffa345 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user.py @@ -0,0 +1,335 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage Tenant Users""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_org_user +short_description: NetApp StorageGRID manage users within a tenancy. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Update, Delete Users within a NetApp StorageGRID tenant. +options: + state: + description: + - Whether the specified user should exist or not. + type: str + choices: ['present', 'absent'] + default: present + full_name: + description: + - Full Name of the user. + - Required for create operation + type: str + unique_name: + description: + - Unique Name for the user. Must begin with C(user/) or C(federated-user/). + - Required for create, modify or delete operation. + type: str + required: true + member_of: + description: + - List of unique_groups that the user is a member of. + type: list + elements: str + password: + description: + - Set a password for a local user. Does not apply to federated users. + - Requires root privilege. + required: false + type: str + update_password: + description: + - Choose when to update the password. + - When set to C(always), the password will always be updated. + - When set to C(on_create), the password will only be set upon a new user creation. + default: on_create + choices: + - on_create + - always + type: str + disable: + description: + - Disable the user from signing in. Does not apply to federated users. + type: bool +""" + +EXAMPLES = """ + - name: create a tenant user + netapp.storagegrid.na_sg_org_user: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + full_name: ansibleuser1 + unique_name: user/ansibleuser1 + member_of: "group/ansiblegroup1" + disable: false + +""" + +RETURN = """ +resp: + description: Returns information about the StorageGRID tenant user. + returned: always + type: dict + sample: { + "fullName": "Example User", + "memberOf": ["00000000-0000-0000-0000-000000000000"], + "disable": false, + "uniqueName": "user/Example", + "accountId": "0", + "id": "00000000-0000-0000-0000-000000000000", + "federated": false, + "userURN": "urn:sgws:identity::0:user/Example" + } +""" + +import json +import re + + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import ( + NetAppModule, +) +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgOrgUser(object): + """ + Create, modify and delete user within a StorageGRID Tenant Account + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + full_name=dict(required=False, type="str"), + unique_name=dict(required=True, type="str"), + member_of=dict(required=False, type="list", elements="str"), + disable=dict(required=False, type="bool"), + password=dict(required=False, type="str", no_log=True), + update_password=dict( + default="on_create", choices=["on_create", "always"] + ), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "present", ["full_name", "unique_name"])], + supports_check_mode=True, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + self.data["memberOf"] = [] + if self.parameters.get("full_name"): + self.data["fullName"] = self.parameters["full_name"] + if self.parameters.get("unique_name"): + self.data["uniqueName"] = self.parameters["unique_name"] + + if self.parameters.get("disable") is not None: + self.data["disable"] = self.parameters["disable"] + + re_local_user = re.compile("^user/") + re_fed_user = re.compile("^federated-user/") + + if ( + re_local_user.match(self.parameters["unique_name"]) is None + and re_fed_user.match(self.parameters["unique_name"]) is None + ): + self.module.fail_json( + msg="unique_name must begin with 'user/' or 'federated-user/'" + ) + + self.pw_change = {} + if self.parameters.get("password") is not None: + if re_fed_user.match(self.parameters["unique_name"]): + self.module.fail_json(msg="password cannot be set for a federated user") + self.pw_change["password"] = self.parameters["password"] + + def get_org_groups(self): + # Get list of groups + # Retrun mapping of uniqueName to ids if found, or None + api = "api/v3/org/groups?limit=350" + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + + if response["data"]: + name_to_id_map = dict( + zip( + [i["uniqueName"] for i in response["data"]], + [j["id"] for j in response["data"]], + ) + ) + return name_to_id_map + + return None + + def get_org_user(self, unique_name): + # Use the unique name to check if the user exists + api = "api/v3/org/users/%s" % unique_name + response, error = self.rest_api.get(api) + + if error: + if response["code"] != 404: + self.module.fail_json(msg=error) + else: + return response["data"] + return None + + def create_org_user(self): + api = "api/v3/org/users" + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_org_user(self, user_id): + api = "api/v3/org/users/" + user_id + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def update_org_user(self, user_id): + api = "api/v3/org/users/" + user_id + + response, error = self.rest_api.put(api, self.data) + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def set_org_user_password(self, unique_name): + api = "api/v3/org/users/%s/change-password" % unique_name + response, error = self.rest_api.post(api, self.pw_change) + + if error: + self.module.fail_json(msg=error["text"]) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + org_user = self.get_org_user(self.parameters["unique_name"]) + + if self.parameters.get("member_of"): + org_groups = self.get_org_groups() + try: + self.data["memberOf"] = [ + org_groups[x] for x in self.parameters["member_of"] + ] + except KeyError as e: + self.module.fail_json( + msg="Invalid unique_group supplied: '%s' not found" % e.args[0] + ) + + cd_action = self.na_helper.get_cd_action(org_user, self.parameters) + + if cd_action is None and self.parameters["state"] == "present": + # let's see if we need to update parameters + update = False + + if org_user["memberOf"] is None: + member_of_diff = [] + else: + member_of_diff = [ + i + for i in self.data["memberOf"] + org_user["memberOf"] + if i not in self.data["memberOf"] or i not in org_user["memberOf"] + ] + if member_of_diff: + update = True + + if self.parameters.get("disable") is not None and self.parameters[ + "disable" + ] != org_user.get("disable"): + update = True + + if update: + self.na_helper.changed = True + + result_message = "" + resp_data = org_user + if self.na_helper.changed: + if self.module.check_mode: + pass + else: + if cd_action == "delete": + self.delete_org_user(org_user["id"]) + result_message = "Org User deleted" + + elif cd_action == "create": + resp_data = self.create_org_user() + result_message = "Org User created" + + else: + resp_data = self.update_org_user(org_user["id"]) + result_message = "Org User updated" + + # If a password has been set + if self.pw_change: + if self.module.check_mode: + pass + else: + # Only update the password if update_password is always, or a create activity has occurred + if cd_action == "create" or self.parameters["update_password"] == "always": + self.set_org_user_password(self.parameters["unique_name"]) + self.na_helper.changed = True + + results = [result_message, "Org User password updated"] + result_message = "; ".join(filter(None, results)) + + self.module.exit_json( + changed=self.na_helper.changed, msg=result_message, resp=resp_data + ) + + +def main(): + """ + Main function + """ + na_sg_org_user = SgOrgUser() + na_sg_org_user.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user_s3_key.py b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user_s3_key.py new file mode 100644 index 000000000..0de396eb7 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/plugins/modules/na_sg_org_user_s3_key.py @@ -0,0 +1,210 @@ +#!/usr/bin/python + +# (c) 2020, NetApp Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""NetApp StorageGRID - Manage User S3 keys""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + + +DOCUMENTATION = """ +module: na_sg_org_user_s3_key +short_description: Creates NetApp StorageGRID User S3 keys. +extends_documentation_fragment: + - netapp.storagegrid.netapp.sg +version_added: '20.6.0' +author: NetApp Ansible Team (@joshedmonds) +description: +- Create, Delete Users S3 keys on NetApp StorageGRID. +options: + state: + description: + - Whether the specified account should exist or not. + type: str + choices: ['present', 'absent'] + default: present + unique_user_name: + description: + - Unique user name owning the S3 Key. + required: true + type: str + expires: + description: + - Date-Time string for the key to expire. + type: str + access_key: + description: + - Access Key or S3 credential pair identifier. + - Required for delete operation. + type: str +""" + +EXAMPLES = """ + - name: create a s3 key + netapp.storagegrid.na_sg_org_user_s3_key: + api_url: "https://" + auth_token: "storagegrid-auth-token" + validate_certs: false + state: present + unique_user_name: user/ansibleuser1 +""" + +RETURN = """ +resp: + description: Returns information about an S3 access key for the user. + returned: always + type: dict + sample: { + "id": "abcABC_01234-0123456789abcABCabc0123456789==", + "accountId": 12345678901234567000, + "displayName": "****************AB12", + "userURN": "urn:sgws:identity::12345678901234567000:root", + "userUUID": "00000000-0000-0000-0000-000000000000", + "expires": "2020-09-04T00:00:00.000Z" + } +""" + +import json + +import ansible_collections.netapp.storagegrid.plugins.module_utils.netapp as netapp_utils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.storagegrid.plugins.module_utils.netapp import SGRestAPI + + +class SgOrgUserS3Key(object): + """ + Create, modify and delete StorageGRID Tenant Account + """ + + def __init__(self): + """ + Parse arguments, setup state variables, + check parameters and ensure request module is installed + """ + self.argument_spec = netapp_utils.na_storagegrid_host_argument_spec() + self.argument_spec.update( + dict( + state=dict(required=False, type="str", choices=["present", "absent"], default="present"), + unique_user_name=dict(required=True, type="str"), + expires=dict(required=False, type="str"), + access_key=dict(required=False, type="str", no_log=False), + ) + ) + + self.module = AnsibleModule( + argument_spec=self.argument_spec, + required_if=[("state", "absent", ["access_key"])], + supports_check_mode=False, + ) + + self.na_helper = NetAppModule() + + # set up state variables + self.parameters = self.na_helper.set_parameters(self.module.params) + # Calling generic SG rest_api class + self.rest_api = SGRestAPI(self.module) + # Checking for the parameters passed and create new parameters list + self.data = {} + self.data["expires"] = self.parameters.get("expires") + + def get_org_user_id(self, unique_name): + # Use the unique name to check if the user exists + api = "api/v3/org/users/%s" % unique_name + response, error = self.rest_api.get(api) + + if error: + if response["code"] != 404: + self.module.fail_json(msg=error) + else: + return response["data"]["id"] + return None + + def get_org_user_s3_key(self, user_id, access_key): + # Use the unique name to check if the user exists + api = "api/v3/org/users/current-user/s3-access-keys/%s" % access_key + + if user_id: + api = "api/v3/org/users/%s/s3-access-keys/%s" % (user_id, access_key,) + + response, error = self.rest_api.get(api) + + if error: + self.module.fail_json(msg=error) + else: + return response["data"] + return None + + def create_org_user_s3_key(self, user_id): + api = "api/v3/org/users/current-user/s3-access-keys" + + if user_id: + api = "api/v3/org/users/%s/s3-access-keys" % user_id + + response, error = self.rest_api.post(api, self.data) + + if error: + self.module.fail_json(msg=error) + + return response["data"] + + def delete_org_user_s3_key(self, user_id, access_key): + api = "api/v3/org/users/current-user/s3-access-keys" + + if user_id: + api = "api/v3/org/users/%s/s3-access-keys/%s" % (user_id, access_key,) + + self.data = None + response, error = self.rest_api.delete(api, self.data) + if error: + self.module.fail_json(msg=error) + + def apply(self): + """ + Perform pre-checks, call functions and exit + """ + result_message = "" + resp_data = {} + user_id = None + + if self.parameters.get("unique_user_name"): + user_id = self.get_org_user_id(self.parameters["unique_user_name"]) + + if self.parameters["state"] == "present": + org_user_s3_key = None + if self.parameters.get("access_key"): + org_user_s3_key = self.get_org_user_s3_key(user_id, self.parameters["access_key"]) + resp_data = org_user_s3_key + + if not org_user_s3_key: # create + resp_data = self.create_org_user_s3_key(user_id) + self.na_helper.changed = True + + if self.parameters["state"] == "absent": + self.delete_org_user_s3_key(user_id, self.parameters["access_key"]) + self.na_helper.changed = True + result_message = "Org User S3 key deleted" + + self.module.exit_json(changed=self.na_helper.changed, msg=result_message, resp=resp_data) + + +def main(): + """ + Main function + """ + na_sg_org_user_s3_key = SgOrgUserS3Key() + na_sg_org_user_s3_key.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp/storagegrid/requirements.txt b/ansible_collections/netapp/storagegrid/requirements.txt new file mode 100644 index 000000000..f2293605c --- /dev/null +++ b/ansible_collections/netapp/storagegrid/requirements.txt @@ -0,0 +1 @@ +requests diff --git a/ansible_collections/netapp/storagegrid/tests/unit/compat/__init__.py b/ansible_collections/netapp/storagegrid/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/storagegrid/tests/unit/compat/builtins.py b/ansible_collections/netapp/storagegrid/tests/unit/compat/builtins.py new file mode 100644 index 000000000..bfc8adfbe --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/compat/builtins.py @@ -0,0 +1,34 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = "builtins" +else: + BUILTINS = "__builtin__" diff --git a/ansible_collections/netapp/storagegrid/tests/unit/compat/mock.py b/ansible_collections/netapp/storagegrid/tests/unit/compat/mock.py new file mode 100644 index 000000000..ce13d07cb --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/compat/mock.py @@ -0,0 +1,125 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +""" +Compat module for Python3.x's unittest.mock module +""" +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print("You need the mock library installed on python2.x to run tests") + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b"\n" if isinstance(read_data, bytes) else "\n" + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=""): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + + file_spec = list( + set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))) + ) + + if mock is None: + mock = MagicMock(name="open", spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/storagegrid/tests/unit/compat/unittest.py b/ansible_collections/netapp/storagegrid/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_account.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_account.py new file mode 100644 index 000000000..e96697381 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_account.py @@ -0,0 +1,380 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Tenant Ansible module: na_sg_grid_account""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_account import ( + SgGridAccount as grid_account_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "pw_change_good": ({"code": 204}, None), + "grid_accounts": ( + { + "data": [ + { + "name": "TestTenantAccount", + "capabilities": ["management", "s3"], + "policy": { + "useAccountIdentitySource": True, + "allowPlatformServices": False, + "quotaObjectBytes": None, + }, + "id": "12345678901234567890", + } + ] + }, + None, + ), + "grid_account_record": ( + { + "data": { + "name": "TestTenantAccount", + "capabilities": ["management", "s3"], + "policy": { + "useAccountIdentitySource": True, + "allowPlatformServices": False, + "quotaObjectBytes": None, + }, + "id": "12345678901234567890", + } + }, + None, + ), + "grid_account_record_with_quota": ( + { + "data": { + "name": "TestTenantAccount", + "capabilities": ["management", "s3"], + "policy": { + "useAccountIdentitySource": True, + "allowPlatformServices": False, + "quotaObjectBytes": 10737418240, + }, + "id": "12345678901234567890", + } + }, + None, + ), + "grid_account_record_update_quota": ( + { + "data": { + "name": "TestTenantAccount", + "capabilities": ["management", "s3"], + "policy": { + "useAccountIdentitySource": True, + "allowPlatformServices": False, + "quotaObjectBytes": 21474836480, + }, + "id": "12345678901234567890", + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "name": "TestTenantAccount", + "protocol": "s3", + "management": True, + "use_own_identity_source": True, + "allow_platform_services": False, + "password": "abc123", + "quota_size": 0, + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "name": "TestTenantAccount", + "protocol": "s3", + "management": True, + "use_own_identity_source": True, + "allow_platform_services": False, + "password": "abc123", + "quota_size": 0, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_account(self): + return dict( + { + "state": "present", + "name": "TestTenantAccount", + "protocol": "s3", + "management": True, + "use_own_identity_source": True, + "allow_platform_services": False, + "password": "abc123", + "quota_size": 0, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_account(self): + return dict( + { + "state": "absent", + "name": "TestTenantAccount", + "protocol": "s3", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_account_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_account_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_grid_account_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_account()) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["empty_good"], # get + SRR["grid_accounts"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_tenant_account_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_grid_account_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_account()) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["grid_accounts"], # get id + SRR["grid_account_record"], # get account + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_tenant_account_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_grid_account_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_account() + args["quota_size"] = 10 + set_module_args(args) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["grid_accounts"], # get + SRR["grid_account_record"], # get + SRR["grid_account_record_with_quota"], # put + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_update_na_sg_tenant_account_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_grid_account_quota_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_account() + args["quota_size"] = 20480 + args["quota_size_unit"] = "mb" + set_module_args(args) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["grid_accounts"], # get + SRR["grid_account_record_with_quota"], # get + SRR["grid_account_record_update_quota"], # put + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_tenant_account_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # update Tenant Account and set pass + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_grid_account_and_set_password_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_account() + args["quota_size"] = 20480 + args["quota_size_unit"] = "mb" + args["update_password"] = "always" + + set_module_args(args) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["grid_accounts"], # get + SRR["grid_account_record_with_quota"], # get + SRR["grid_account_record_update_quota"], # put + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_update_na_sg_grid_account_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # set pass only + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_set_na_sg_grid_account_root_password_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_account() + args["update_password"] = "always" + + set_module_args(args) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["grid_accounts"], # get id + SRR["grid_account_record"], # get account + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_set_na_sg_grid_account_root_password_pass: %s" % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_delete_na_sg_grid_account_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_grid_account()) + my_obj = grid_account_module() + mock_request.side_effect = [ + SRR["grid_accounts"], # get + SRR["grid_account_record"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_tenant_account_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_certificate.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_certificate.py new file mode 100644 index 000000000..74974abff --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_certificate.py @@ -0,0 +1,342 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid Certificate Ansible module: na_sg_grid_certificate""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_certificate import ( + SgGridCertificate as grid_certificate_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ({"status": "error", "code": 404, "data": {}}, {"key": "error.404"},), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": (None, None), + "update_good": (None, None), + "cert_unset": ({"data": {"serverCertificateEncoded": None, "caBundleEncoded": None}}, None), + "storage_api_cert": ( + { + "data": { + "serverCertificateEncoded": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "NDI5MDQ1NTM1WjAmMQswCQYDVQQGEwJVUzEXMBUGA1UEAwwOczMuZXhhbXBsZS5j\n" + "b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0LMcJUdWmTtxi7U7B\n" + "yldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36QC22n\n" + "+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIaQ8l8\n" + "STa7nLS7BIc6rD15BJaNWZpDVHIzhljlnhfnqwio/ZfP++lAjk4/j8pPGPEEI5Fe\n" + "WxhOtQjr7xTHeJxKHp2VKiLEvFxniL3qk4uJ3k5fJ7IqALUEPWH92brFp2IkObUA\n" + "EGsZYB4KFV7asBVhGuspYNzUQ6NqWbEUmtTjKEXcb1TA8RK+Pc2TotOrQ2E7Z+rU\n" + "gl2fAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAD5PW1WI7GCfxLQjaitnXpD1MR2O\n" + "6b5csymPYwRejMsSswd8egjs+vO2pbF9TptLjqGliE9XUoI+mWpuMzzd75F0jcjq\n" + "1DhlINgAmjUJEAg0RAqce0Kn8xQF+SofMtkOH+nZm3Q9nbTJKr1H5m2TnCq3v5TH\n" + "Qo0ASf0LLGgrwUtT0IghdSttYLS89dJprZ6c5wK7qeBzxfdHxxjiaSnvByL2Ryn5\n" + "cec9lptYKoRY42hWvkQv9Wkr3DDoyNA3xPdZJr0Hpf8/mSPnt9r/AR8E32xi0SXp\n" + "hOMTDgMicbK82ycxz0yW88gm6yhrChlJrWaEsVGod3FU+lbMAnagYZ/Vwp8=\n" + "-----END CERTIFICATE-----\n" + ), + "caBundleEncoded": None, + } + }, + None, + ), + "storage_api_cert_update": ( + { + "data": { + "serverCertificateEncoded": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICzjCCAbYCCQDZVi1OT89SAjANBgkqhkiG9w0BAQsFADApMQswCQYDVQQGEwJV\n" + "UzEaMBgGA1UEAwwRczMubmV3ZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NzIxWhcN\n" + "MjIwNDI5MDQ1NzIxWjApMQswCQYDVQQGEwJVUzEaMBgGA1UEAwwRczMubmV3ZXhh\n" + "bXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCmg37q2sjZ\n" + "k+HsXtai3PSMtGUiqij04JtG9ahMqIejuxy5sDCWnigh//NjdK+wPYc2VfYd6KFA\n" + "Uk9rP84M7sqdqGzIzmyEu7INyCnlbxcXlST6UZDsZnVU7Gk2GvUzk2OoO5N+G0oI\n" + "Lfc/3eKTx9j9BguOaWUy+ni+Te8j6EwK6HolGRBjLYqf1SYFBzaoVpy7pmzaFZ4R\n" + "10jFSxHbotIZ+kR8pPE5jGkP8OjOfrpbhEgmffpeq2MSCMRuhRtRiVp4ULwkMTRN\n" + "tFj89mu1gl9T3lYM/LO1SmBv3il0mNmrTL+99UJ4s2eL0zr/uHAVYJcVqFgWP7X8\n" + "WnOk+d86b0TXAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFmGV3IOuNYeM3LQxls+\n" + "/CNHznvIqvoiJOWq0S7LFy1eO7PVzCl3l/fDKjGMt2lGXeU89YKdFVPqsainNEFT\n" + "cNEWlezVut+/CWQpBXujyBqPLkYbzyGsakMImDb+MrSkBO5MCjlt38vppm5a97fB\n" + "9o/wM31e+N6gJLiHWs0XB9TK6bY9CvcutcGUOH/oxH1TEBgrJ3SoS7/HmZJSaCQA\n" + "hjZappzuEpGVXT8YDlb67PzUoE2rDWjdSFRXCk/0U6VR0xNgnN1WtfHaypU71DrB\n" + "zxbDaOIZoDp5G4OgjkFxoCoSWLant+LsqEwclIbCFgEvJPE8855UThelTHmIfivP\n" + "veI=\n-----END CERTIFICATE-----\n" + ), + "caBundleEncoded": None, + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "-----END PRIVATE KEY-----\n" + ), + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "type": "storage-api", + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "-----END PRIVATE KEY-----\n" + ), + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_grid_storage_api_certificate(self): + return dict( + { + "state": "present", + "type": "storage-api", + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "NDI5MDQ1NTM1WjAmMQswCQYDVQQGEwJVUzEXMBUGA1UEAwwOczMuZXhhbXBsZS5j\n" + "b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0LMcJUdWmTtxi7U7B\n" + "yldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36QC22n\n" + "+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIaQ8l8\n" + "STa7nLS7BIc6rD15BJaNWZpDVHIzhljlnhfnqwio/ZfP++lAjk4/j8pPGPEEI5Fe\n" + "WxhOtQjr7xTHeJxKHp2VKiLEvFxniL3qk4uJ3k5fJ7IqALUEPWH92brFp2IkObUA\n" + "EGsZYB4KFV7asBVhGuspYNzUQ6NqWbEUmtTjKEXcb1TA8RK+Pc2TotOrQ2E7Z+rU\n" + "gl2fAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAD5PW1WI7GCfxLQjaitnXpD1MR2O\n" + "6b5csymPYwRejMsSswd8egjs+vO2pbF9TptLjqGliE9XUoI+mWpuMzzd75F0jcjq\n" + "1DhlINgAmjUJEAg0RAqce0Kn8xQF+SofMtkOH+nZm3Q9nbTJKr1H5m2TnCq3v5TH\n" + "Qo0ASf0LLGgrwUtT0IghdSttYLS89dJprZ6c5wK7qeBzxfdHxxjiaSnvByL2Ryn5\n" + "cec9lptYKoRY42hWvkQv9Wkr3DDoyNA3xPdZJr0Hpf8/mSPnt9r/AR8E32xi0SXp\n" + "hOMTDgMicbK82ycxz0yW88gm6yhrChlJrWaEsVGod3FU+lbMAnagYZ/Vwp8=\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "Q8l8STa7nLS7BIc6rD15BJaNWZpDVHIzhljlnhfnqwio/ZfP++lAjk4/j8pPGPEE\n" + "I5FeWxhOtQjr7xTHeJxKHp2VKiLEvFxniL3qk4uJ3k5fJ7IqALUEPWH92brFp2Ik\n" + "ObUAEGsZYB4KFV7asBVhGuspYNzUQ6NqWbEUmtTjKEXcb1TA8RK+Pc2TotOrQ2E7\n" + "Z+rUgl2fAgMBAAECggEAAwSSqTDTvSx4WNiqAocnsPMqfckIUUOnLjLef5yzKRuQ\n" + "6l/9NpXDP3b5S6fLDBJrrw46tNIW/BgWjl01y7+rCxqE13L9SvLgtHjbua52ITOf\n" + "l0u/fDmcKHOfOqpsPhlaloYYeqsuAwLGl4CC+wBEpuj26uDRcw4x7E78NV8IIxDf\n" + "8kUNPQXI9ox6P3isXrFkMncDfKLWOYJ5fF5zCoVZai/SS8z3FhGjAXlMkay48RX4\n" + "4vuP7TNLZ2O2pAk2aVs54tQyBn9MOxIzOg3/ZFLiKZR4pY6H5sm+bT263TdvN+A4\n" + "C8kwML5HnsCjVkTzJ/3dYc9SeUOuqvJI332GCQ9YcQKBgQD8Ev2qhS61kZ3WGO6G\n" + "DRkZ6tDyt5vCuzWQ8uAAXcAerFDWN6XtDPfXq2UVcWnoCQOUpnjslCb/NJgCetLh\n" + "mOPeJGRWyMly+YuYb4/rnbwSbUs28PO4D9B/f5YQBnBjGDLL/i2+wnXg3WZTVogf\n" + "WfdKziOHGSxmWd6JinI+4UkpiwKBgQD3+krkFORTsUAlTgeIy8+QzXSuclwNygcX\n" + "HAe0F96hSYHBC7+1n7nzC1lwcbkU3jLIt3A90Uwew4nr5GCu4sSVwDeWrqP2I9WH\n" + "4w0zeaFPC1QKfKGBtsIf/89pDz/7iGlcKWlEg+56VVIJn7qC2lO8qbeUCoglsSwC\n" + "vr2Qld5WvQKBgQCHM2xpHHv8GPlOTxsIPVg8RW0C8iYSITVO5GXu7FnSWdwVuc0+\n" + "QtlgDObvxF/oe4U3Ir7zLVdpRH1Pvy8Cn22AxYYn4hPiniQYg6Xu2zB3tbVE56Hh\n" + "FGJhMD59o+Z90AnWziMdENIG5NkwU9Y48pknvz7hBEiDMSqiHObAATerlwKBgQCP\n" + "5LhCY3Ees3MCcqXilkmqv93eQFP0WHAG0+gQc+1m7+2QJI4pCTdwtfw/SG5akpkr\n" + "aW6DIIkoLNVCgbIsqT/jmbdoA4z3DlIg2PrXDNQytuMcdreNOoyo3trvHr9E6SIi\n" + "LZF9BYWDjTDejsY+mgwPJPh2uinInWdpbF85oA11jQKBgQCc6U2fSwpPQowOaat/\n" + "pY5bDCKxhfwrKk3Ecye5HfhbBZ0pu6Oneiq6cNhQC0X69iFn6ogTFx5qqyMQrWH0\n" + "L+kQRkyYFLnebCzUA8364lieRzc3cN+xQEn+jX8z7eDZ8JsvVnKdc6lTjPTwN1Fj\n" + "FZtaH2L1IEiA8ZZapMb/MNNozg==\n" + "-----END PRIVATE KEY-----\n" + ), + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_storage_api_certificate(self): + return dict( + { + "state": "absent", + "type": "storage-api", + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "-----END PRIVATE KEY-----\n" + ), + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_certificate_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + def test_module_pass_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_certificate_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_pass_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_set_na_sg_grid_storage_api_certificate_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_storage_api_certificate()) + my_obj = grid_certificate_module() + mock_request.side_effect = [ + SRR["cert_unset"], # get + SRR["update_good"], # post + SRR["storage_api_cert"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_set_na_sg_grid_storage_api_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_set_na_sg_grid_storage_api_certificate_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_storage_api_certificate()) + my_obj = grid_certificate_module() + mock_request.side_effect = [ + SRR["storage_api_cert"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_set_na_sg_grid_storage_api_certificate_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_storage_api_certificate_pass(self, mock_request): + args = self.set_args_set_na_sg_grid_storage_api_certificate() + args["server_certificate"] = "" + args["private_key"] = "" + + set_module_args(args) + my_obj = grid_certificate_module() + mock_request.side_effect = [ + SRR["storage_api_cert"], # get + SRR["update_good"], # put + SRR["storage_api_cert_update"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_storage_api_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_delete_na_sg_storage_api_certificate_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_grid_storage_api_certificate()) + my_obj = grid_certificate_module() + mock_request.side_effect = [ + SRR["storage_api_cert"], # get + SRR["delete_good"], # delete + SRR["cert_unset"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_storage_api_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_client_certificate.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_client_certificate.py new file mode 100644 index 000000000..d21f9da9c --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_client_certificate.py @@ -0,0 +1,347 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid HA Group Ansible module: na_sg_grid_client_certificate""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys + +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip("Skipping Unit Tests on 2.6 as requests is not available") + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_client_certificate import ( + SgGridClientCertificate as grid_client_certificate_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": (None, None), + "update_good": (None, None), + "version_114": ({"data": {"productVersion": "11.4.0-20200721.1338.d3969b3"}}, None), + "version_116": ({"data": {"productVersion": "11.6.0-20211120.0301.850531e"}}, None), + "client_cert_record": ( + { + "data": { + "id": "841ee2c7-3144-4c3c-8709-335462c5b05d", + "displayName": "testcert1", + "publicKey": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIEOzCCAyOgAwIBAgIIFuVL2ktGT0MwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UE\n" + "BhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxFDASBgNVBAoM\n" + "-----END CERTIFICATE-----\n" + ), + "allowPrometheus": True, + "expiryDate": "2024-01-01T00:00:00.000Z", + } + }, + None, + ), + "client_cert_record_updated": ( + { + "data": { + "id": "841ee2c7-3144-4c3c-8709-335462c5b05d", + "displayName": "testcert1", + "publicKey": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICrDCCAZSgAwIBAgIUM3IQEKIypqPrXmoA/KmELXfFAz8wDQYJKoZIhvcNAQEL\n" + "BQAwADAeFw0yMjA5MDUyMzI3MTVaFw0yNDA5MDQyMzI3MTVaMAAwggEiMA0GCSqG\n" + "-----END CERTIFICATE-----\n" + ), + "allowPrometheus": True, + "expiryDate": "2024-01-01T00:00:00.000Z", + } + }, + None, + ), + "client_cert_record_rename": ( + { + "data": { + "id": "841ee2c7-3144-4c3c-8709-335462c5b05d", + "displayName": "testcert1-rename", + "publicKey": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIEOzCCAyOgAwIBAgIIFuVL2ktGT0MwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UE\n" + "BhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxFDASBgNVBAoM\n" + "-----END CERTIFICATE-----\n" + ), + "allowPrometheus": True, + "expiryDate": "2024-01-01T00:00:00.000Z", + } + }, + None, + ), + "client_certificates": ( + { + "data": [ + { + "id": "841ee2c7-3144-4c3c-8709-335462c5b05d", + "displayName": "testcert1", + "publicKey": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIEOzCCAyOgAwIBAgIIFuVL2ktGT0MwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UE\n" + "BhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxFDASBgNVBAoM\n" + "-----END CERTIFICATE-----\n" + ), + "allowPrometheus": True, + "expiryDate": "2024-01-01T00:00:00.000Z", + }, + { + "id": "869e1792-5505-42f1-a1fc-57a04e56f644", + "displayName": "testcert2", + "publicKey": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIC9DCCAdygAwIBAgIUD7y+AyrSqRjQdYVflLJ9aTIJu3wwDQYJKoZIhvcNAQEL\n" + "BQAwFTETMBEGA1UEAwwKUHJvbWV0aGV1czAeFw0yMjA4MjQxMjQxNDhaFw0yNDA4\n" + "-----END CERTIFICATE-----\n" + ), + "allowPrometheus": True, + "expiryDate": "2024-01-01T00:00:00.000Z", + }, + ] + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """a group of related Unit Tests""" + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "allow_prometheus": True, + "public_key": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIEOzCCAyOgAwIBAgIIFuVL2ktGT0MwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UE\n" + "BhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxFDASBgNVBAoM\n" + "-----END CERTIFICATE-----\n" + ), + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "display_name": "testcert1", + "allow_prometheus": True, + "public_key": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIEOzCCAyOgAwIBAgIIFuVL2ktGT0MwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UE\n" + "BhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxFDASBgNVBAoM\n" + "-----END CERTIFICATE-----\n" + ), + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_client_certificate(self): + return dict( + { + "state": "present", + "display_name": "testcert1", + "allow_prometheus": True, + "public_key": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIEOzCCAyOgAwIBAgIIFuVL2ktGT0MwDQYJKoZIhvcNAQELBQAwbzELMAkGA1UE\n" + "BhMCVVMxCzAJBgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxFDASBgNVBAoM\n" + "-----END CERTIFICATE-----\n" + ), + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_client_certificate(self): + return dict( + { + "state": "absent", + "display_name": "testcert1", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_when_required_args_missing(self, mock_request): + """required arguments are reported as errors""" + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_client_certificate_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_pass_when_required_args_present(self, mock_request): + """required arguments are reported as errors""" + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_client_certificate_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_pass_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_client_certificate_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_client_certificate()) + mock_request.side_effect = [ + SRR["empty_good"], # get + SRR["client_cert_record"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_client_certificate_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_grid_client_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_create_na_sg_grid_client_certificate_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_client_certificate() + set_module_args(args) + mock_request.side_effect = [ + SRR["client_certificates"], # get + SRR["client_cert_record"], # get + SRR["end_of_sequence"], + ] + my_obj = grid_client_certificate_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_create_na_sg_grid_client_certificate_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_client_certificate_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_client_certificate() + args["public_key"] = ( + "-----BEGIN CERTIFICATE-----\n" + "MIICrDCCAZSgAwIBAgIUM3IQEKIypqPrXmoA/KmELXfFAz8wDQYJKoZIhvcNAQEL\n" + "BQAwADAeFw0yMjA5MDUyMzI3MTVaFw0yNDA5MDQyMzI3MTVaMAAwggEiMA0GCSqG\n" + "-----END CERTIFICATE-----\n", + ) + set_module_args(args) + mock_request.side_effect = [ + SRR["client_certificates"], # get + SRR["client_cert_record"], # get + SRR["client_cert_record_updated"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_client_certificate_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_client_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_rename_na_sg_grid_client_certificate_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_client_certificate() + args["certificate_id"] = "841ee2c7-3144-4c3c-8709-335462c5b05d" + args["display_name"] = "testcert1-rename" + set_module_args(args) + mock_request.side_effect = [ + SRR["client_cert_record"], # get + SRR["client_cert_record_rename"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_client_certificate_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_rename_na_sg_grid_client_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_delete_na_sg_grid_client_certificate_pass(self, mock_request): + args = self.set_args_delete_na_sg_grid_client_certificate() + set_module_args(args) + mock_request.side_effect = [ + SRR["client_certificates"], # get + SRR["client_cert_record"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + my_obj = grid_client_certificate_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_grid_client_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_client_certificate_bad_certificate_id_fail(self, mock_request): + args = self.set_args_create_na_sg_grid_client_certificate() + args["certificate_id"] = "ffffffff-ffff-aaaa-aaaa-000000000000" + args["display_name"] = "Bad ID" + set_module_args(args) + mock_request.side_effect = [ + SRR["not_found"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = grid_client_certificate_module() + my_obj.apply() + print("Info: test_update_na_sg_grid_client_certificate_bad_certificate_id_fail: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["failed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_dns.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_dns.py new file mode 100644 index 000000000..42abde9c8 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_dns.py @@ -0,0 +1,241 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID DNS Ansible module: na_sg_grid_dns""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_dns import ( + SgGridDns as grid_dns_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "no_dns_servers": ({"data": []}, None,), + "dns_servers": ({"data": ["10.11.12.5", "10.11.12.6"]}, None,), + "add_dns_servers": ( + {"data": ["10.11.12.5", "10.11.12.6", "10.11.12.7"]}, + None, + ), + "remove_dns_servers": ({"data": ["10.11.12.5"]}, None,), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "dns_servers": "10.11.12.8", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "dns_servers": "10.11.12.8", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_grid_dns_servers(self): + return dict( + { + "state": "present", + "dns_servers": "10.11.12.5,10.11.12.6", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_add_na_sg_grid_dns_server(self): + return dict( + { + "state": "present", + "dns_servers": "10.11.12.5,10.11.12.6,10.11.12.7", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_remove_na_sg_grid_dns_server(self): + return dict( + { + "state": "present", + "dns_servers": "10.11.12.5", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_dns_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_dns_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_set_na_sg_grid_dns_servers_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_dns_servers()) + my_obj = grid_dns_module() + mock_request.side_effect = [ + SRR["no_dns_servers"], # get + SRR["dns_servers"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_set_na_sg_grid_dns_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_set_na_sg_grid_dns_servers_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_dns_servers()) + my_obj = grid_dns_module() + mock_request.side_effect = [ + SRR["dns_servers"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_set_na_sg_grid_dns_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_add_na_sg_grid_dns_servers_pass(self, mock_request): + set_module_args(self.set_args_add_na_sg_grid_dns_server()) + my_obj = grid_dns_module() + mock_request.side_effect = [ + SRR["dns_servers"], # get + SRR["add_dns_servers"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_add_na_sg_grid_dns_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_remove_na_sg_grid_dns_servers_pass(self, mock_request): + set_module_args(self.set_args_remove_na_sg_grid_dns_server()) + my_obj = grid_dns_module() + mock_request.side_effect = [ + SRR["dns_servers"], # get + SRR["remove_dns_servers"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_remove_na_sg_grid_dns_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_gateway.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_gateway.py new file mode 100644 index 000000000..0a5a7e386 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_gateway.py @@ -0,0 +1,693 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid Load Balancer Endpoint Ansible module: na_sg_grid_gateway""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys + +# try: +# from requests import Response +# except ImportError: +# if sys.version_info < (2, 7): +# pytestmark = pytest.mark.skip("Skipping Unit Tests on 2.6 as requests is not available") +# else: +# raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_gateway import ( + SgGridGateway as grid_gateway_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": (None, None), + "update_good": (None, None), + "version_114": ({"data": {"productVersion": "11.4.0-20200721.1338.d3969b3"}}, None), + "version_116": ({"data": {"productVersion": "11.6.0-20211120.0301.850531e"}}, None), + "gateway_record": ( + { + "data": { + "id": "e777d415-057f-4d37-9b0c-6d132d872ea0", + "displayName": "ansibletest-secure", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + } + }, + None, + ), + "gateway_record_ha_group_binding": ( + { + "data": { + "id": "e777d415-057f-4d37-9b0c-6d132d872ea0", + "displayName": "ansibletest-secure", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + "pinTargets": {"haGroups": ["c08e6dca-038d-4a05-9499-6fbd1e6a4c3e"], "nodeInterfaces": []}, + } + }, + None, + ), + "gateway_record_node_interface_binding": ( + { + "data": { + "id": "e777d415-057f-4d37-9b0c-6d132d872ea0", + "displayName": "ansibletest-secure", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + "pinTargets": { + "haGroups": [], + "nodeInterfaces": [ + {"interface": "eth2", "nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b"}, + {"interface": "eth2", "nodeId": "970ad050-b68b-4aae-a94d-aef73f3095c4"}, + ], + }, + } + }, + None, + ), + "gateway_record_rename": ( + { + "data": { + "id": "e777d415-057f-4d37-9b0c-6d132d872ea0", + "displayName": "ansibletest-rename", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + "pinTargets": {"haGroups": ["c08e6dca-038d-4a05-9499-6fbd1e6a4c3e"], "nodeInterfaces": []}, + } + }, + None, + ), + "ha_groups": ( + { + "data": [ + { + "id": "c08e6dca-038d-4a05-9499-6fbd1e6a4c3e", + "name": "site1_primary", + "description": "test ha group", + "virtualIps": ["10.193.174.117"], + "interfaces": [ + { + "nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b", + "nodeName": "SITE1-ADM1", + "interface": "eth2", + "preferredMaster": True, + }, + { + "nodeId": "970ad050-b68b-4aae-a94d-aef73f3095c4", + "nodeName": "SITE2-ADM1", + "interface": "eth2", + }, + ], + "gatewayCidr": "192.168.14.1/24", + }, + { + "id": "da9ac524-9a16-4be0-9d6e-ec9b22218e75", + "name": "site1_gw", + "description": "another test ha group", + "virtualIps": ["10.193.204.200"], + "interfaces": [ + { + "nodeId": "7bb5bf05-a04c-4344-8abd-08c5c4048666", + "nodeName": "SITE1-GW1", + "interface": "eth0", + "preferredMaster": True, + }, + ], + "gatewayCidr": "192.168.14.1/24", + } + ] + }, + None, + ), + "node_health": ( + { + "data": [ + { + "id": "0b1866ed-d6e7-41b4-815f-bf867348b76b", + "isPrimaryAdmin": True, + "name": "SITE1-ADM1", + "siteId": "ae56d06d-bd83-46bd-adce-77146b1d94bd", + "siteName": "SITE1", + "severity": "normal", + "state": "connected", + "type": "adminNode", + }, + { + "id": "970ad050-b68b-4aae-a94d-aef73f3095c4", + "isPrimaryAdmin": False, + "name": "SITE2-ADM1", + "siteId": "7c24002e-5157-43e9-83e5-02db9b265b02", + "siteName": "SITE2", + "severity": "normal", + "state": "connected", + "type": "adminNode", + }, + ] + }, + None, + ), + "present_gateways": ( + { + "data": [ + { + "id": "e777d415-057f-4d37-9b0c-6d132d872ea0", + "displayName": "ansibletest-secure", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + } + ] + }, + None, + ), + "present_gateways_with_binding": ( + { + "data": [ + { + "id": "e777d415-057f-4d37-9b0c-6d132d872ea0", + "displayName": "ansibletest-secure", + "enableIPv4": True, + "enableIPv6": True, + "port": 10443, + "secure": True, + "accountId": "0", + "pinTargets": {"haGroups": [], "nodeInterfaces": []}, + } + ] + }, + None, + ), + "server_config": ( + { + "data": { + "defaultServiceType": "s3", + "certSource": "plaintext", + "plaintextCertData": { + "serverCertificateEncoded": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "NDI5MDQ1NTM1WjAmMQswCQYDVQQGEwJVUzEXMBUGA1UEAwwOczMuZXhhbXBsZS5j\n" + "b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0LMcJUdWmTtxi7U7B\n" + "yldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36QC22n\n" + "+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIaQ8l8\n" + "STa7nLS7BIc6rD15BJaNWZpDVHIzhljlnhfnqwio/ZfP++lAjk4/j8pPGPEEI5Fe\n" + "WxhOtQjr7xTHeJxKHp2VKiLEvFxniL3qk4uJ3k5fJ7IqALUEPWH92brFp2IkObUA\n" + "EGsZYB4KFV7asBVhGuspYNzUQ6NqWbEUmtTjKEXcb1TA8RK+Pc2TotOrQ2E7Z+rU\n" + "gl2fAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAD5PW1WI7GCfxLQjaitnXpD1MR2O\n" + "6b5csymPYwRejMsSswd8egjs+vO2pbF9TptLjqGliE9XUoI+mWpuMzzd75F0jcjq\n" + "1DhlINgAmjUJEAg0RAqce0Kn8xQF+SofMtkOH+nZm3Q9nbTJKr1H5m2TnCq3v5TH\n" + "Qo0ASf0LLGgrwUtT0IghdSttYLS89dJprZ6c5wK7qeBzxfdHxxjiaSnvByL2Ryn5\n" + "cec9lptYKoRY42hWvkQv9Wkr3DDoyNA3xPdZJr0Hpf8/mSPnt9r/AR8E32xi0SXp\n" + "hOMTDgMicbK82ycxz0yW88gm6yhrChlJrWaEsVGod3FU+lbMAnagYZ/Vwp8=\n" + "-----END CERTIFICATE-----\n" + ), + "caBundleEncoded": None, + "metadata": { + "serverCertificateDetails": { + "subject": "/CN=test", + "issuer": "/CN=test", + "serialNumber": "32:6F:20:EB:0E:90:60:7E:07:8F:6E:CC:02:2D:7C:37:3D:AB:42:7E", + "notBefore": "2021-09-27T12:39:17.000Z", + "notAfter": "2023-09-27T12:39:17.000Z", + "fingerPrints": { + "SHA-1": "A4:F9:74:BE:E8:A2:46:C2:E1:23:DE:8F:A8:1B:F1:C4:91:51:C5:56", + "SHA-256": "7B:65:7F:CD:35:8F:33:1C:C8:2D:F0:C1:9F:58:2F:2B:3B:78:44:95:4E:23:8C:1B:2B:91:6C:94:B0:71:64:E8", + }, + "subjectAltNames": ["DNS:*.test.com"], + } + }, + }, + } + }, + None, + ), + "server_config_cert_update": ( + { + "data": { + "defaultServiceType": "s3", + "certSource": "plaintext", + "plaintextCertData": { + "serverCertificateEncoded": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICzjCCAbYCCQDZVi1OT89SAjANBgkqhkiG9w0BAQsFADApMQswCQYDVQQGEwJV\n" + "UzEaMBgGA1UEAwwRczMubmV3ZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NzIxWhcN\n" + "MjIwNDI5MDQ1NzIxWjApMQswCQYDVQQGEwJVUzEaMBgGA1UEAwwRczMubmV3ZXhh\n" + "bXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCmg37q2sjZ\n" + "k+HsXtai3PSMtGUiqij04JtG9ahMqIejuxy5sDCWnigh//NjdK+wPYc2VfYd6KFA\n" + "Uk9rP84M7sqdqGzIzmyEu7INyCnlbxcXlST6UZDsZnVU7Gk2GvUzk2OoO5N+G0oI\n" + "Lfc/3eKTx9j9BguOaWUy+ni+Te8j6EwK6HolGRBjLYqf1SYFBzaoVpy7pmzaFZ4R\n" + "10jFSxHbotIZ+kR8pPE5jGkP8OjOfrpbhEgmffpeq2MSCMRuhRtRiVp4ULwkMTRN\n" + "tFj89mu1gl9T3lYM/LO1SmBv3il0mNmrTL+99UJ4s2eL0zr/uHAVYJcVqFgWP7X8\n" + "WnOk+d86b0TXAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFmGV3IOuNYeM3LQxls+\n" + "/CNHznvIqvoiJOWq0S7LFy1eO7PVzCl3l/fDKjGMt2lGXeU89YKdFVPqsainNEFT\n" + "cNEWlezVut+/CWQpBXujyBqPLkYbzyGsakMImDb+MrSkBO5MCjlt38vppm5a97fB\n" + "9o/wM31e+N6gJLiHWs0XB9TK6bY9CvcutcGUOH/oxH1TEBgrJ3SoS7/HmZJSaCQA\n" + "hjZappzuEpGVXT8YDlb67PzUoE2rDWjdSFRXCk/0U6VR0xNgnN1WtfHaypU71DrB\n" + "zxbDaOIZoDp5G4OgjkFxoCoSWLant+LsqEwclIbCFgEvJPE8855UThelTHmIfivP\n" + "veI=\n-----END CERTIFICATE-----\n" + ), + "caBundleEncoded": None, + "metadata": { + "serverCertificateDetails": { + "subject": "/CN=test", + "issuer": "/CN=test", + "serialNumber": "32:6F:20:EB:0E:90:60:7E:07:8F:6E:CC:02:2D:7C:37:3D:AB:42:7E", + "notBefore": "2021-09-27T12:39:17.000Z", + "notAfter": "2023-09-27T12:39:17.000Z", + "fingerPrints": { + "SHA-1": "F2:C2:6F:A8:45:DA:86:09:91:F5:04:B0:25:43:B7:FC:FA:C1:43:F8", + "SHA-256": "99:3E:21:1A:03:25:69:C8:0A:D5:FE:E3:FB:6E:51:03:BD:A7:0E:88:6B:53:06:04:92:3B:34:17:68:43:F7:2F", + }, + "subjectAltNames": ["DNS:*.test.com"], + } + }, + }, + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """a group of related Unit Tests""" + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "display_name": "ansibletest-secure", + "default_service_type": "s3", + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "-----END PRIVATE KEY-----\n" + ), + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "display_name": "ansibletest-secure", + "default_service_type": "s3", + "port": 10443, + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "-----END PRIVATE KEY-----\n" + ), + "api_url": "https://gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_gateway_port(self): + return dict( + { + "state": "present", + "display_name": "ansibletest-secure", + "default_service_type": "s3", + "port": 10443, + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "NDI5MDQ1NTM1WjAmMQswCQYDVQQGEwJVUzEXMBUGA1UEAwwOczMuZXhhbXBsZS5j\n" + "b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0LMcJUdWmTtxi7U7B\n" + "yldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36QC22n\n" + "+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIaQ8l8\n" + "STa7nLS7BIc6rD15BJaNWZpDVHIzhljlnhfnqwio/ZfP++lAjk4/j8pPGPEEI5Fe\n" + "WxhOtQjr7xTHeJxKHp2VKiLEvFxniL3qk4uJ3k5fJ7IqALUEPWH92brFp2IkObUA\n" + "EGsZYB4KFV7asBVhGuspYNzUQ6NqWbEUmtTjKEXcb1TA8RK+Pc2TotOrQ2E7Z+rU\n" + "gl2fAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAD5PW1WI7GCfxLQjaitnXpD1MR2O\n" + "6b5csymPYwRejMsSswd8egjs+vO2pbF9TptLjqGliE9XUoI+mWpuMzzd75F0jcjq\n" + "1DhlINgAmjUJEAg0RAqce0Kn8xQF+SofMtkOH+nZm3Q9nbTJKr1H5m2TnCq3v5TH\n" + "Qo0ASf0LLGgrwUtT0IghdSttYLS89dJprZ6c5wK7qeBzxfdHxxjiaSnvByL2Ryn5\n" + "cec9lptYKoRY42hWvkQv9Wkr3DDoyNA3xPdZJr0Hpf8/mSPnt9r/AR8E32xi0SXp\n" + "hOMTDgMicbK82ycxz0yW88gm6yhrChlJrWaEsVGod3FU+lbMAnagYZ/Vwp8=\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "Q8l8STa7nLS7BIc6rD15BJaNWZpDVHIzhljlnhfnqwio/ZfP++lAjk4/j8pPGPEE\n" + "I5FeWxhOtQjr7xTHeJxKHp2VKiLEvFxniL3qk4uJ3k5fJ7IqALUEPWH92brFp2Ik\n" + "ObUAEGsZYB4KFV7asBVhGuspYNzUQ6NqWbEUmtTjKEXcb1TA8RK+Pc2TotOrQ2E7\n" + "Z+rUgl2fAgMBAAECggEAAwSSqTDTvSx4WNiqAocnsPMqfckIUUOnLjLef5yzKRuQ\n" + "6l/9NpXDP3b5S6fLDBJrrw46tNIW/BgWjl01y7+rCxqE13L9SvLgtHjbua52ITOf\n" + "l0u/fDmcKHOfOqpsPhlaloYYeqsuAwLGl4CC+wBEpuj26uDRcw4x7E78NV8IIxDf\n" + "8kUNPQXI9ox6P3isXrFkMncDfKLWOYJ5fF5zCoVZai/SS8z3FhGjAXlMkay48RX4\n" + "4vuP7TNLZ2O2pAk2aVs54tQyBn9MOxIzOg3/ZFLiKZR4pY6H5sm+bT263TdvN+A4\n" + "C8kwML5HnsCjVkTzJ/3dYc9SeUOuqvJI332GCQ9YcQKBgQD8Ev2qhS61kZ3WGO6G\n" + "DRkZ6tDyt5vCuzWQ8uAAXcAerFDWN6XtDPfXq2UVcWnoCQOUpnjslCb/NJgCetLh\n" + "mOPeJGRWyMly+YuYb4/rnbwSbUs28PO4D9B/f5YQBnBjGDLL/i2+wnXg3WZTVogf\n" + "WfdKziOHGSxmWd6JinI+4UkpiwKBgQD3+krkFORTsUAlTgeIy8+QzXSuclwNygcX\n" + "HAe0F96hSYHBC7+1n7nzC1lwcbkU3jLIt3A90Uwew4nr5GCu4sSVwDeWrqP2I9WH\n" + "4w0zeaFPC1QKfKGBtsIf/89pDz/7iGlcKWlEg+56VVIJn7qC2lO8qbeUCoglsSwC\n" + "vr2Qld5WvQKBgQCHM2xpHHv8GPlOTxsIPVg8RW0C8iYSITVO5GXu7FnSWdwVuc0+\n" + "QtlgDObvxF/oe4U3Ir7zLVdpRH1Pvy8Cn22AxYYn4hPiniQYg6Xu2zB3tbVE56Hh\n" + "FGJhMD59o+Z90AnWziMdENIG5NkwU9Y48pknvz7hBEiDMSqiHObAATerlwKBgQCP\n" + "5LhCY3Ees3MCcqXilkmqv93eQFP0WHAG0+gQc+1m7+2QJI4pCTdwtfw/SG5akpkr\n" + "aW6DIIkoLNVCgbIsqT/jmbdoA4z3DlIg2PrXDNQytuMcdreNOoyo3trvHr9E6SIi\n" + "LZF9BYWDjTDejsY+mgwPJPh2uinInWdpbF85oA11jQKBgQCc6U2fSwpPQowOaat/\n" + "pY5bDCKxhfwrKk3Ecye5HfhbBZ0pu6Oneiq6cNhQC0X69iFn6ogTFx5qqyMQrWH0\n" + "L+kQRkyYFLnebCzUA8364lieRzc3cN+xQEn+jX8z7eDZ8JsvVnKdc6lTjPTwN1Fj\n" + "FZtaH2L1IEiA8ZZapMb/MNNozg==\n" + "-----END PRIVATE KEY-----\n" + ), + "api_url": "https://gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_gateway_port(self): + return dict( + { + "state": "absent", + "display_name": "ansibletest-secure", + "default_service_type": "s3", + "port": 10443, + "server_certificate": ( + "-----BEGIN CERTIFICATE-----\n" + "MIICyDCCAbACCQCgFntI3q7iADANBgkqhkiG9w0BAQsFADAmMQswCQYDVQQGEwJV\n" + "UzEXMBUGA1UEAwwOczMuZXhhbXBsZS5jb20wHhcNMjEwNDI5MDQ1NTM1WhcNMjIw\n" + "-----END CERTIFICATE-----\n" + ), + "private_key": ( + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD0LMcJUdWmTtxi\n" + "7U7ByldDRfyCD9W+QJ1Ygm7E9iFwvkThUCV5q+DIcgSfogoSKaQuHaImLXMZn36Q\n" + "C22n+Ah2EGrQiggyny3wDzuWf5/Qg7ogqQRqiespBFLlV4RGCREHK0y5uq8mzpIa\n" + "-----END PRIVATE KEY-----\n" + ), + "api_url": "https://gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_when_required_args_missing(self, mock_request): + """required arguments are reported as errors""" + mock_request.side_effect = [ + SRR["version_114"], + ] + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_gateway_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_pass_when_required_args_present(self, mock_request): + """required arguments are reported as errors""" + mock_request.side_effect = [ + SRR["version_114"], + ] + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_gateway_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_pass_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_gateway_port_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_gateway_port()) + mock_request.side_effect = [ + SRR["version_114"], # get + SRR["empty_good"], # get + SRR["gateway_record"], # post + SRR["server_config"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_grid_gateway_port_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_create_na_sg_grid_gateway_port_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + del args["private_key"] + set_module_args(args) + mock_request.side_effect = [ + SRR["version_114"], # get + SRR["present_gateways"], # get + SRR["server_config"], # get + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_create_na_sg_grid_gateway_port_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_gateway_certificate_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + args["server_certificate"] = "-----BEGIN CERTIFICATE-----\nABCDEFGABCD\n-----END CERTIFICATE-----\n" + args["private_key"] = "-----BEGIN PRIVATE KEY-----\nABCDEFGABCD\n-----END PRIVATE KEY-----\n" + + set_module_args(args) + mock_request.side_effect = [ + SRR["version_114"], # get + SRR["present_gateways"], # get + SRR["server_config"], # get + SRR["server_config_cert_update"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_gateway_certificate_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_delete_na_sg_grid_gateway_port_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_grid_gateway_port()) + mock_request.side_effect = [ + SRR["version_114"], # get + SRR["present_gateways"], # get + SRR["server_config"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_grid_gateway_port_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_minimum_version_not_met(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + args["binding_mode"] = "ha-groups" + set_module_args(args) + mock_request.side_effect = [ + SRR["version_114"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + grid_gateway_module() + print("Info: test_module_fail_minimum_version_not_met: %s" % exc.value.args[0]["msg"]) + + # test create with ha groups + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_gateway_port_with_ha_group_binding_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + args["binding_mode"] = "ha-groups" + args["ha_groups"] = ["site1_primary", "da9ac524-9a16-4be0-9d6e-ec9b22218e75"] + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], # get + SRR["ha_groups"], # get + SRR["empty_good"], # get + SRR["gateway_record_ha_group_binding"], # post + SRR["server_config"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_grid_gateway_port_with_ha_group_binding_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + # test create with bad ha group ID + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_gateway_port_with_bad_ha_group_binding_fail(self, mock_request): + mock_request.side_effect = [ + SRR["version_116"], # get + SRR["ha_groups"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_args_create_na_sg_grid_gateway_port() + args["binding_mode"] = "ha-groups" + args["ha_groups"] = ["fffac524-9a16-4be0-9d6e-ec9b22218e75"] + set_module_args(args) + grid_gateway_module() + print("Info: test_create_na_sg_grid_gateway_port_with_bad_ha_group_binding_fail: %s" % repr(exc.value.args[0])) + + # test create with node interfaces + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_gateway_port_with_node_interface_binding_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + args["binding_mode"] = "node-interfaces" + args["node_interfaces"] = [ + {"node": "SITE1-ADM1", "interface": "eth2"}, + {"node": "SITE2-ADM1", "interface": "eth2"}, + ] + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], # get + SRR["node_health"], # get + SRR["empty_good"], # get + SRR["gateway_record_node_interface_binding"], # post + SRR["server_config"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_grid_gateway_port_with_node_interface_binding_pass: %s" % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # test change from global to ha groups + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_gateway_binding_to_ha_groups_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + args["binding_mode"] = "ha-groups" + args["ha_groups"] = "site1_primary" + args["server_certificate"] = "-----BEGIN CERTIFICATE-----\nABCDEFGABCD\n-----END CERTIFICATE-----\n" + args["private_key"] = "-----BEGIN PRIVATE KEY-----\nABCDEFGABCD\n-----END PRIVATE KEY-----\n" + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], # get + SRR["ha_groups"], # get + SRR["present_gateways_with_binding"], # get + SRR["server_config"], # get + SRR["gateway_record_ha_group_binding"], # put + SRR["server_config_cert_update"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_gateway_binding_to_ha_groups_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + # test rename by supplying gateway_id + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_gateway_rename_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_gateway_port() + args["gateway_id"] = "e777d415-057f-4d37-9b0c-6d132d872ea0" + args["binding_mode"] = "ha-groups" + args["ha_groups"] = "site1_primary" + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], # get + SRR["ha_groups"], # get + SRR["gateway_record_ha_group_binding"], # get + SRR["server_config"], # get + SRR["gateway_record_rename"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_gateway_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_gateway_rename_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_group.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_group.py new file mode 100644 index 000000000..fd9fdf15c --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_group.py @@ -0,0 +1,317 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid Group Ansible module: na_sg_grid_group""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_group import ( + SgGridGroup as grid_group_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "grid_groups": ( + { + "data": [ + { + "displayName": "TestGridGroup", + "uniqueName": "group/testgridgroup", + "policies": { + "management": { + "tenantAccounts": True, + "metricsQuery": True, + "maintenance": True, + }, + }, + "id": "00000000-0000-0000-0000-000000000000", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup", + } + ] + }, + None, + ), + "grid_group_record": ( + { + "data": { + "displayName": "TestGridGroup", + "uniqueName": "group/testgridgroup", + "policies": { + "management": { + "tenantAccounts": True, + "metricsQuery": True, + "maintenance": True, + }, + }, + "id": "00000000-0000-0000-0000-000000000000", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup", + } + }, + None, + ), + "grid_group_record_update": ( + { + "data": { + "displayName": "TestGridGroup", + "uniqueName": "group/testgridgroup", + "policies": { + "management": { + "tenantAccounts": True, + "metricsQuery": False, + "maintenance": True, + "ilm": True, + }, + }, + "id": "00000000-0000-0000-0000-000000000000", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup", + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "display_name": "TestGroup", + "management_policy": { + "maintenance": True, + "ilm": True, + "root_access": False, + }, + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "display_name": "TestGroup", + "unique_name": "group/testgroup", + "management_policy": { + "maintenance": True, + "ilm": True, + "root_access": False, + }, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_group(self): + return dict( + { + "state": "present", + "display_name": "TestGridGroup", + "unique_name": "group/testgridgroup", + "management_policy": { + "tenant_accounts": True, + "metrics_query": True, + "maintenance": True, + }, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_group(self): + return dict( + { + "state": "absent", + "unique_name": "group/testgridgroup", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_group_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_group_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + def test_module_fail_with_bad_unique_name(self): + """ error returned if unique_name doesn't start with group or federated_group """ + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_default_args_pass_check() + args["unique_name"] = "noprefixgroup" + set_module_args(args) + grid_group_module() + print( + "Info: test_module_fail_with_bad_unique_name: %s" + % exc.value.args[0]["msg"] + ) + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_grid_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_group()) + my_obj = grid_group_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["grid_group_record"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_grid_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_grid_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_group()) + my_obj = grid_group_module() + mock_request.side_effect = [ + SRR["grid_group_record"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_grid_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_grid_group_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_group() + args["management_policy"]["tenant_accounts"] = True + args["management_policy"]["metrics_query"] = False + args["management_policy"]["ilm"] = False + + set_module_args(args) + my_obj = grid_group_module() + mock_request.side_effect = [ + SRR["grid_group_record"], # get + SRR["grid_group_record_update"], # put + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_update_na_sg_grid_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_delete_na_sg_grid_group_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_grid_group()) + my_obj = grid_group_module() + mock_request.side_effect = [ + SRR["grid_group_record"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_delete_na_sg_grid_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ha_group.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ha_group.py new file mode 100644 index 000000000..fbc8fd0ce --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ha_group.py @@ -0,0 +1,408 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid HA Group Ansible module: na_sg_grid_ha_group""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys + +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip("Skipping Unit Tests on 2.6 as requests is not available") + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_ha_group import ( + SgGridHaGroup as grid_ha_group_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": (None, None), + "update_good": (None, None), + "version_114": ({"data": {"productVersion": "11.4.0-20200721.1338.d3969b3"}}, None), + "version_116": ({"data": {"productVersion": "11.6.0-20211120.0301.850531e"}}, None), + "ha_group_record": ( + { + "data": { + "id": "fbe724da-c941-439b-bb61-a536f6211ca9", + "name": "ansible-ha-group", + "description": None, + "virtualIps": ["192.168.50.5"], + "interfaces": [ + {"nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b", "interface": "ens256"}, + {"nodeId": "7bb5bf05-a04c-4344-8abd-08c5c4048666", "interface": "ens256"}, + ], + "gatewayCidr": "192.168.50.1/24", + } + }, + None, + ), + "ha_group_record_twovip": ( + { + "data": { + "id": "fbe724da-c941-439b-bb61-a536f6211ca9", + "name": "ansible-ha-group", + "description": "2 VIP HA Group", + "virtualIps": ["192.168.50.5", "192.168.50.6"], + "interfaces": [ + {"nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b", "interface": "ens256"}, + {"nodeId": "7bb5bf05-a04c-4344-8abd-08c5c4048666", "interface": "ens256"}, + ], + "gatewayCidr": "192.168.50.1/24", + } + }, + None, + ), + "ha_group_record_rename": ( + { + "data": { + "id": "fbe724da-c941-439b-bb61-a536f6211ca9", + "name": "ansible-ha-group-rename", + "description": None, + "virtualIps": ["192.168.50.5"], + "interfaces": [ + {"nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b", "interface": "ens256"}, + {"nodeId": "7bb5bf05-a04c-4344-8abd-08c5c4048666", "interface": "ens256"}, + ], + "gatewayCidr": "192.168.50.1/24", + } + }, + None, + ), + "ha_groups": ( + { + "data": [ + { + "id": "c08e6dca-038d-4a05-9499-6fbd1e6a4c3e", + "name": "site1_primary", + "description": "test ha group", + "virtualIps": ["10.193.174.117"], + "interfaces": [ + { + "nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b", + "nodeName": "SITE1-ADM1", + "interface": "eth2", + "preferredMaster": True, + }, + { + "nodeId": "970ad050-b68b-4aae-a94d-aef73f3095c4", + "nodeName": "SITE2-ADM1", + "interface": "eth2", + }, + ], + "gatewayCidr": "192.168.14.1/24", + }, + { + "id": "fbe724da-c941-439b-bb61-a536f6211ca9", + "name": "ansible-ha-group", + "description": None, + "virtualIps": ["192.168.50.5"], + "interfaces": [ + {"nodeId": "0b1866ed-d6e7-41b4-815f-bf867348b76b", "interface": "ens256"}, + {"nodeId": "7bb5bf05-a04c-4344-8abd-08c5c4048666", "interface": "ens256"}, + ], + "gatewayCidr": "192.168.50.1/24", + }, + ] + }, + None, + ), + "node_health": ( + { + "data": [ + { + "id": "0b1866ed-d6e7-41b4-815f-bf867348b76b", + "isPrimaryAdmin": True, + "name": "SITE1-ADM1", + "siteId": "ae56d06d-bd83-46bd-adce-77146b1d94bd", + "siteName": "SITE1", + "severity": "normal", + "state": "connected", + "type": "adminNode", + }, + { + "id": "7bb5bf05-a04c-4344-8abd-08c5c4048666", + "isPrimaryAdmin": None, + "name": "SITE1-G1", + "siteId": "ae56d06d-bd83-46bd-adce-77146b1d94bd", + "siteName": "SITE1", + "severity": "normal", + "state": "connected", + "type": "apiGatewayNode", + }, + { + "id": "970ad050-b68b-4aae-a94d-aef73f3095c4", + "isPrimaryAdmin": False, + "name": "SITE2-ADM1", + "siteId": "7c24002e-5157-43e9-83e5-02db9b265b02", + "siteName": "SITE2", + "severity": "normal", + "state": "connected", + "type": "adminNode", + }, + ] + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """a group of related Unit Tests""" + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "gateway_cidr": "192.168.50.1/24", + "virtual_ips": "192.168.50.5", + "interfaces": [ + {"node": "SITE1-ADM1", "interface": "ens256"}, + {"node": "SITE1-G1", "interface": "ens256"}, + ], + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "name": "ansible-test-ha-group", + "gateway_cidr": "192.168.50.1/24", + "virtual_ips": "192.168.50.5", + "interfaces": [ + {"node": "SITE1-ADM1", "interface": "ens256"}, + {"node": "SITE1-G1", "interface": "ens256"}, + ], + "api_url": "https://gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_ha_group(self): + return dict( + { + "state": "present", + "name": "ansible-ha-group", + "gateway_cidr": "192.168.50.1/24", + "virtual_ips": "192.168.50.5", + "interfaces": [ + {"node": "SITE1-ADM1", "interface": "ens256"}, + {"node": "SITE1-G1", "interface": "ens256"}, + ], + "api_url": "https://gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_ha_group(self): + return dict( + { + "state": "absent", + "name": "ansible-ha-group", + "gateway_cidr": "192.168.50.1/24", + "virtual_ips": "192.168.50.5", + "interfaces": [ + {"node": "SITE1-ADM1", "interface": "ens256"}, + {"node": "SITE1-G1", "interface": "ens256"}, + ], + "api_url": "https://gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_when_required_args_missing(self, mock_request): + """required arguments are reported as errors""" + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_ha_group_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_pass_when_required_args_present(self, mock_request): + """required arguments are reported as errors""" + mock_request.side_effect = [ + SRR["node_health"], # get + ] + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_ha_group_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_pass_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_ha_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_ha_group()) + mock_request.side_effect = [ + SRR["node_health"], # get + SRR["empty_good"], # get + SRR["ha_group_record"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_ha_group_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_grid_ha_group_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_create_na_sg_grid_ha_group_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_ha_group() + set_module_args(args) + mock_request.side_effect = [ + SRR["node_health"], # get + SRR["ha_groups"], # get + SRR["ha_group_record"], # get + SRR["end_of_sequence"], + ] + my_obj = grid_ha_group_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_create_na_sg_grid_ha_group_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_ha_group_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_ha_group() + args["description"] = "2 VIP HA Group" + args["virtual_ips"] = ["192.168.50.5", "192.168.50.6"] + set_module_args(args) + mock_request.side_effect = [ + SRR["node_health"], # get + SRR["ha_groups"], # get + SRR["ha_group_record"], # get + SRR["ha_group_record_twovip"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_ha_group_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_ha_group_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_rename_na_sg_grid_ha_group_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_ha_group() + args["ha_group_id"] = "fbe724da-c941-439b-bb61-a536f6211ca9" + args["name"] = "ansible-ha-group-rename" + set_module_args(args) + mock_request.side_effect = [ + SRR["node_health"], # get + SRR["ha_group_record"], # get + SRR["ha_group_record_rename"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_ha_group_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_rename_na_sg_grid_ha_group_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_delete_na_sg_grid_ha_group_pass(self, mock_request): + args = self.set_args_delete_na_sg_grid_ha_group() + set_module_args(args) + mock_request.side_effect = [ + SRR["ha_groups"], # get + SRR["ha_group_record"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + my_obj = grid_ha_group_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_grid_ha_group_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_ha_group_bad_node_fail(self, mock_request): + args = self.set_args_create_na_sg_grid_ha_group() + args["interfaces"] = [{"node": "FakeNode", "interface": "eth0"}] + set_module_args(args) + mock_request.side_effect = [ + SRR["node_health"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + grid_ha_group_module() + print("Info: test_create_na_sg_grid_ha_group_bad_node_fail: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["failed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_ha_group_bad_ha_group_id_fail(self, mock_request): + args = self.set_args_create_na_sg_grid_ha_group() + args["ha_group_id"] = "ffffffff-ffff-aaaa-aaaa-000000000000" + args["virtual_ips"] = "192.168.50.10" + set_module_args(args) + mock_request.side_effect = [ + SRR["node_health"], # get + SRR["not_found"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = grid_ha_group_module() + my_obj.apply() + print("Info: test_create_na_sg_grid_ha_group_bad_node_fail: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["failed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_identity_federation.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_identity_federation.py new file mode 100644 index 000000000..058fc609e --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_identity_federation.py @@ -0,0 +1,354 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid Identity Federation Ansible module: na_sg_grid_identity_federation""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_identity_federation import ( + SgGridIdentityFederation as grid_identity_federation_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "check_mode_good": (None, None), + "identity_federation_unset": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": True, + "type": "", + "ldapServiceType": "", + "hostname": "", + "port": 0, + "username": "", + "password": None, + "baseGroupDn": "", + "baseUserDn": "", + "disableTLS": False, + "enableLDAPS": False, + "caCert": "", + } + }, + None, + ), + "identity_federation": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": False, + "type": "ldap", + "ldapServiceType": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "disableTLS": True, + "enableLDAPS": False, + "caCert": "", + } + }, + None, + ), + "identity_federation_tls": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": False, + "type": "ldap", + "ldapServiceType": "Active Directory", + "hostname": "ad.example.com", + "port": 636, + "username": "binduser", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "disableTLS": False, + "enableLDAPS": True, + "caCert": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIF+DCCBOCgAwIBAgITRwAAAAIg5KzMrJo+kQAAAAAAAjANBgkqhkiG9w0BAQUF\n" + "ADBlMRIwEAYKCZImiZPyLGQBGRYCYXUxFjAUBgoJkiaJk/IsZAEZFgZuZXRhcHAx\n" + "FjAUBgoJkiaJk/IsZAEZFgZhdXNuZ3MxHzAdBgNVBAMTFmF1c25ncy1NRUxOR1NE\n" + "QzAxLUNBLTEwHhcNMjEwMjExMDkzMTIwWhcNMjMwMjExMDk0MTIwWjAAMIIBIjAN\n" + "BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt2xPi4FS4Uc37KrDLEXXUoc4lhhT\n" + "uQmMnLc0PYZCIpzYOaosFIeGqco3woiC7wSZJ2whKE4RDcxxgE+azuGiSWVjIxIL\n" + "AimmcDhFid/T3KRN5jmkjBzUKuPBYzZBFih8iU9056rqgN7eMKQYjRwPeV0+AeiB\n" + "irw46OgkwVQu3shEUtXxZPP2Mb6Md23+4vSmcElUcW28Opt2q/M5fs7DNomG3eaG\n" + "-----END CERTIFICATE-----\n" + ), + } + }, + None, + ), + "identity_federation_disable": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": True, + "type": "ldap", + "ldapServiceType": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "disableTLS": True, + "enableLDAPS": False, + "caCert": "", + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "Disabled", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "Disabled", + "state": "present", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_grid_identity_federation(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "Disabled", + "state": "present", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_grid_identity_federation_tls(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 636, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "LDAPS", + "ca_cert": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIF+DCCBOCgAwIBAgITRwAAAAIg5KzMrJo+kQAAAAAAAjANBgkqhkiG9w0BAQUF\n" + "ADBlMRIwEAYKCZImiZPyLGQBGRYCYXUxFjAUBgoJkiaJk/IsZAEZFgZuZXRhcHAx\n" + "FjAUBgoJkiaJk/IsZAEZFgZhdXNuZ3MxHzAdBgNVBAMTFmF1c25ncy1NRUxOR1NE\n" + "QzAxLUNBLTEwHhcNMjEwMjExMDkzMTIwWhcNMjMwMjExMDk0MTIwWjAAMIIBIjAN\n" + "BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt2xPi4FS4Uc37KrDLEXXUoc4lhhT\n" + "uQmMnLc0PYZCIpzYOaosFIeGqco3woiC7wSZJ2whKE4RDcxxgE+azuGiSWVjIxIL\n" + "AimmcDhFid/T3KRN5jmkjBzUKuPBYzZBFih8iU9056rqgN7eMKQYjRwPeV0+AeiB\n" + "irw46OgkwVQu3shEUtXxZPP2Mb6Md23+4vSmcElUcW28Opt2q/M5fs7DNomG3eaG\n" + "-----END CERTIFICATE-----\n" + ), + "state": "present", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_remove_na_sg_grid_identity_federation(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "state": "absent", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_identity_federation_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_identity_federation_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_fail_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_set_na_sg_grid_identity_federation_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_identity_federation()) + my_obj = grid_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation_unset"], # get + SRR["identity_federation"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_set_na_sg_grid_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_set_na_sg_grid_identity_federation_pass(self, mock_request): + args = self.set_args_set_na_sg_grid_identity_federation() + # remove password + del args["password"] + set_module_args(args) + my_obj = grid_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_set_na_sg_grid_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_set_na_sg_grid_identity_federation_tls_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_identity_federation_tls()) + my_obj = grid_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation_unset"], # get + SRR["identity_federation_tls"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_set_na_sg_grid_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_remove_na_sg_grid_identity_federation_pass(self, mock_request): + set_module_args(self.set_args_remove_na_sg_grid_identity_federation()) + my_obj = grid_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation"], # get + SRR["identity_federation_disable"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_remove_na_sg_grid_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + # test check mode + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_check_mode_na_sg_grid_identity_federation_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_identity_federation()) + my_obj = grid_identity_federation_module() + my_obj.module.check_mode = True + mock_request.side_effect = [ + SRR["identity_federation_unset"], # get + SRR["check_mode_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_check_mode_na_sg_grid_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + assert exc.value.args[0]["msg"] == "Connection test successful" diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_info.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_info.py new file mode 100644 index 000000000..2de26109b --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_info.py @@ -0,0 +1,362 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' Unit Tests NetApp StorageGRID Grid Ansible module: na_sg_grid_info ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import patch + +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_info \ + import NetAppSgGatherInfo as sg_grid_info_module + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_good': ({'data': []}, None), + 'end_of_sequence': (None, 'Unexpected call to send_request'), + 'generic_error': (None, 'Expected error'), + 'grid_accounts': ( + { + 'data': [ + { + 'name': 'TestTenantAccount1', + 'capabilities': ['management', 's3'], + 'policy': { + 'useAccountIdentitySource': True, + 'allowPlatformServices': False, + 'quotaObjectBytes': None, + }, + 'id': '12345678901234567891', + }, + { + 'name': 'TestTenantAccount2', + 'capabilities': ['management', 's3'], + 'policy': { + 'useAccountIdentitySource': True, + 'allowPlatformServices': False, + 'quotaObjectBytes': None, + }, + 'id': '12345678901234567892', + }, + { + 'name': 'TestTenantAccount3', + 'capabilities': ['management', 's3'], + 'policy': { + 'useAccountIdentitySource': True, + 'allowPlatformServices': False, + 'quotaObjectBytes': None, + }, + 'id': '12345678901234567893', + }, + ] + }, + None, + ), + 'grid_alarms': ({'data': []}, None), + 'grid_audit': ({'data': {}}, None), + 'grid_compliance_global': ({'data': {}}, None), + 'grid_config': ({'data': {}}, None), + 'grid_config_management': ({'data': {}}, None), + 'grid_config_product_version': ({'data': {}}, None), + 'grid_deactivated_features': ({'data': {}}, None), + 'grid_dns_servers': ({'data': []}, None), + 'grid_domain_names': ({'data': []}, None), + 'grid_ec_profiles': ({'data': []}, None), + 'grid_expansion': ({'data': {}}, None), + 'grid_expansion_nodes': ({'data': []}, None), + 'grid_expansion_sites': ({'data': []}, None), + 'grid_grid_networks': ({'data': []}, None), + 'grid_groups': ({'data': []}, None), + 'grid_health': ({'data': {}}, None), + 'grid_health_topology': ({'data': {}}, None), + 'grid_identity_source': ({'data': {}}, None), + 'grid_ilm_criteria': ({'data': []}, None), + 'grid_ilm_policies': ({'data': []}, None), + 'grid_ilm_rules': ({'data': []}, None), + 'grid_license': ({'data': []}, None), + 'grid_management_certificate': ({'data': {}}, None), + 'grid_ntp_servers': ({'data': []}, None), + 'grid_recovery': ({'data': {}}, None), + 'grid_recovery_available_nodes': ({'data': []}, None), + 'grid_regions': ({'data': []}, None), + 'grid_schemes': ({'data': []}, None), + 'grid_snmp': ({'data': {}}, None), + 'grid_storage_api_certificate': ({'data': {}}, None), + 'grid_untrusted_client_network': ({'data': {}}, None), + 'grid_users': ( + { + 'data': [ + { + 'accountId': '0', + 'disable': False, + 'federated': False, + 'fullName': 'Root', + 'id': '00000000-0000-0000-0000-000000000000', + 'memberOf': None, + 'uniqueName': 'root', + 'userURN': 'urn:sgws:identity::0:root' + }, + ] + }, + None + ), + 'grid_users_root': ( + { + 'data': { + 'accountId': '0', + 'disable': False, + 'federated': False, + 'fullName': 'Root', + 'id': '00000000-0000-0000-0000-000000000000', + 'memberOf': None, + 'uniqueName': 'root', + 'userURN': 'urn:sgws:identity::0:root' + }, + }, + None + ), + 'versions': ({'data': [2, 3]}, None), +} + + +def set_module_args(args): + ''' Prepare arguments so that they will be picked up during module creation ''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + ''' Exception class to be raised by module.exit_json and caught by the test case ''' + pass + + +class AnsibleFailJson(Exception): + ''' Exception class to be raised by module.fail_json and caught by the test case ''' + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + ''' Function to patch over exit_json; package return data into an exception ''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + ''' Function to patch over fail_json; package return data into an exception ''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' A group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + 'api_url': 'sgmi.example.com', + } + ) + + def set_default_args_pass_check(self): + return dict( + { + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + } + ) + + def set_default_optional_args_pass_check(self): + return dict( + { + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + 'gather_subset': ['all'], + 'parameters': {'limit': 5}, + } + ) + + def set_args_run_sg_gather_facts_for_all_info(self): + return dict({ + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + }) + + def set_args_run_sg_gather_facts_for_grid_accounts_info(self): + return dict({ + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + 'gather_subset': ['grid_accounts_info'], + }) + + def set_args_run_sg_gather_facts_for_grid_accounts_and_grid_users_root_info(self): + return dict({ + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + 'gather_subset': ['grid_accounts_info', 'grid/users/root'], + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + sg_grid_info_module() + print( + 'Info: test_module_fail_when_required_args_missing: %s' + % exc.value.args[0]['msg'] + ) + + def test_module_pass_when_required_args_present(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + sg_grid_info_module() + exit_json(changed=True, msg='Induced arguments check') + print( + 'Info: test_module_pass_when_required_args_present: %s' + % exc.value.args[0]['msg'] + ) + assert exc.value.args[0]['changed'] + + def test_module_pass_when_optional_args_present(self): + ''' Optional arguments are reported as pass ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_optional_args_pass_check()) + sg_grid_info_module() + exit_json(changed=True, msg='Induced arguments check') + print( + 'Info: test_module_pass_when_optional_args_present: %s' + % exc.value.args[0]['msg'] + ) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request') + def test_run_sg_gather_facts_for_all_info_pass(self, mock_request): + set_module_args(self.set_args_run_sg_gather_facts_for_all_info()) + my_obj = sg_grid_info_module() + gather_subset = [ + 'grid/accounts', + 'grid/alarms', + 'grid/audit', + 'grid/compliance-global', + 'grid/config', + 'grid/config/management', + 'grid/config/product-version', + 'grid/deactivated-features', + 'grid/dns-servers', + 'grid/domain-names', + 'grid/ec-profiles', + 'grid/expansion', + 'grid/expansion/nodes', + 'grid/expansion/sites', + 'grid/grid-networks', + 'grid/groups', + 'grid/health', + 'grid/health/topology', + 'grid/identity-source', + 'grid/ilm-criteria', + 'grid/ilm-policies', + 'grid/ilm-rules', + 'grid/license', + 'grid/management-certificate', + 'grid/ntp-servers', + 'grid/recovery/available-nodes', + 'grid/recovery', + 'grid/regions', + 'grid/schemes', + 'grid/snmp', + 'grid/storage-api-certificate', + 'grid/untrusted-client-network', + 'grid/users', + 'grid/users/root', + 'versions', + ] + mock_request.side_effect = [ + SRR['grid_accounts'], + SRR['grid_alarms'], + SRR['grid_audit'], + SRR['grid_compliance_global'], + SRR['grid_config'], + SRR['grid_config_management'], + SRR['grid_config_product_version'], + SRR['grid_deactivated_features'], + SRR['grid_dns_servers'], + SRR['grid_domain_names'], + SRR['grid_ec_profiles'], + SRR['grid_expansion'], + SRR['grid_expansion_nodes'], + SRR['grid_expansion_sites'], + SRR['grid_grid_networks'], + SRR['grid_groups'], + SRR['grid_health'], + SRR['grid_health_topology'], + SRR['grid_identity_source'], + SRR['grid_ilm_criteria'], + SRR['grid_ilm_policies'], + SRR['grid_ilm_rules'], + SRR['grid_license'], + SRR['grid_management_certificate'], + SRR['grid_ntp_servers'], + SRR['grid_recovery_available_nodes'], + SRR['grid_recovery'], + SRR['grid_regions'], + SRR['grid_schemes'], + SRR['grid_snmp'], + SRR['grid_storage_api_certificate'], + SRR['grid_untrusted_client_network'], + SRR['grid_users'], + SRR['grid_users_root'], + SRR['versions'], + SRR['end_of_sequence'], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_run_sg_gather_facts_for_all_info_pass: %s' % repr(exc.value.args)) + assert set(exc.value.args[0]['sg_info']) == set(gather_subset) + + @patch('ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request') + def test_run_sg_gather_facts_for_grid_accounts_info_pass(self, mock_request): + set_module_args(self.set_args_run_sg_gather_facts_for_grid_accounts_info()) + my_obj = sg_grid_info_module() + gather_subset = ['grid/accounts'] + mock_request.side_effect = [ + SRR['grid_accounts'], + SRR['end_of_sequence'], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_run_sg_gather_facts_for_grid_accounts_info_pass: %s' % repr(exc.value.args)) + assert set(exc.value.args[0]['sg_info']) == set(gather_subset) + + @patch('ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request') + def test_run_sg_gather_facts_for_grid_accounts_and_grid_users_root_info_pass(self, mock_request): + set_module_args(self.set_args_run_sg_gather_facts_for_grid_accounts_and_grid_users_root_info()) + my_obj = sg_grid_info_module() + gather_subset = ['grid/accounts', 'grid/users/root'] + mock_request.side_effect = [ + SRR['grid_accounts'], + SRR['grid_users_root'], + SRR['end_of_sequence'], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_run_sg_gather_facts_for_grid_accounts_and_grid_users_root_info_pass: %s' % repr(exc.value.args)) + assert set(exc.value.args[0]['sg_info']) == set(gather_subset) diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ntp.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ntp.py new file mode 100644 index 000000000..eed83d49b --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_ntp.py @@ -0,0 +1,257 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID NTP Ansible module: na_sg_grid_ntp""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_ntp import ( + SgGridNtp as grid_ntp_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "ntp_servers": ({"data": ["123.12.3.123", "123.1.23.123"]}, None,), + "update_ntp_servers": ({"data": ["123.12.3.123", "12.3.12.3"]}, None,), + "add_ntp_servers": ( + {"data": ["123.12.3.123", "123.1.23.123", "12.3.12.3"]}, + None, + ), + "remove_ntp_servers": ({"data": ["123.12.3.123"]}, None,), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "ntp_servers": "123.12.3.123,123.1.23.123", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "passphrase": "secretstring", + "ntp_servers": "123.12.3.123,123.1.23.123", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "passphrase": "secretstring", + "ntp_servers": "123.12.3.123,123.1.23.123", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_grid_ntp_servers(self): + return dict( + { + "state": "present", + "passphrase": "secretstring", + "ntp_servers": "123.12.3.123,12.3.12.3", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_add_na_sg_grid_ntp_servers(self): + return dict( + { + "state": "present", + "passphrase": "secretstring", + "ntp_servers": "123.12.3.123,123.1.23.123,12.3.12.3", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_remove_na_sg_grid_ntp_server(self): + return dict( + { + "state": "present", + "passphrase": "secretstring", + "ntp_servers": "123.12.3.123", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_ntp_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_ntp_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_set_na_sg_grid_ntp_servers_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_ntp_servers()) + my_obj = grid_ntp_module() + mock_request.side_effect = [ + SRR["ntp_servers"], # get + SRR["update_ntp_servers"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_set_na_sg_grid_ntp_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_set_na_sg_grid_ntp_servers_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_ntp_servers()) + my_obj = grid_ntp_module() + mock_request.side_effect = [ + SRR["update_ntp_servers"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_set_na_sg_grid_ntp_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_add_na_sg_grid_ntp_servers_pass(self, mock_request): + set_module_args(self.set_args_add_na_sg_grid_ntp_servers()) + my_obj = grid_ntp_module() + mock_request.side_effect = [ + SRR["ntp_servers"], # get + SRR["add_ntp_servers"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_add_na_sg_grid_ntp_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_remove_na_sg_grid_ntp_servers_pass(self, mock_request): + set_module_args(self.set_args_remove_na_sg_grid_ntp_server()) + my_obj = grid_ntp_module() + mock_request.side_effect = [ + SRR["ntp_servers"], # get + SRR["remove_ntp_servers"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_remove_na_sg_grid_ntp_servers_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_regions.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_regions.py new file mode 100644 index 000000000..585ba3f45 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_regions.py @@ -0,0 +1,206 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Regions Ansible module: na_sg_grid_regions""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_regions import ( + SgGridRegions as grid_regions_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "default_regions": ({"data": ["us-east-1"]}, None,), + "regions": ({"data": ["us-east-1", "us-west-1"]}, None,), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "regions": "us-east-1,us-west-1", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "regions": "us-east-1,us-west-1", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_grid_regions(self): + return dict( + { + "state": "present", + "regions": "us-east-1,us-west-1", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_remove_na_sg_grid_regions(self): + return dict( + { + "state": "present", + "regions": "us-east-1", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_regions_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_regions_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_set_na_sg_grid_regions_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_regions()) + my_obj = grid_regions_module() + mock_request.side_effect = [ + SRR["default_regions"], # get + SRR["regions"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_set_na_sg_grid_regions_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_set_na_sg_grid_regions_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_grid_regions()) + my_obj = grid_regions_module() + mock_request.side_effect = [ + SRR["regions"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_set_na_sg_grid_regions_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_remove_na_sg_grid_regions_pass(self, mock_request): + set_module_args(self.set_args_remove_na_sg_grid_regions()) + my_obj = grid_regions_module() + mock_request.side_effect = [ + SRR["regions"], # get + SRR["default_regions"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_remove_na_sg_grid_regions_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_traffic_classes.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_traffic_classes.py new file mode 100644 index 000000000..42fce0e3b --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_traffic_classes.py @@ -0,0 +1,355 @@ +# (c) 2022, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid HA Group Ansible module: na_sg_grid_traffic_classes""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys + +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip("Skipping Unit Tests on 2.6 as requests is not available") + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_traffic_classes import ( + SgGridTrafficClasses as grid_traffic_classes_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": (None, None), + "update_good": (None, None), + "version_114": ({"data": {"productVersion": "11.4.0-20200721.1338.d3969b3"}}, None), + "version_116": ({"data": {"productVersion": "11.6.0-20211120.0301.850531e"}}, None), + "traffic_class_record": ( + { + "data": { + "id": "6b2946e6-7fed-40d0-9262-8e922580aba7", + "name": "ansible-test-traffic-class", + "description": "Ansible Test", + "matchers": [ + {"type": "cidr", "inverse": False, "members": ["192.168.50.0/24"]}, + {"type": "bucket", "inverse": False, "members": ["ansible-test1", "ansible-test2"]}, + ], + "limits": [], + } + }, + None, + ), + "traffic_class_record_updated": ( + { + "data": { + "id": "6b2946e6-7fed-40d0-9262-8e922580aba7", + "name": "ansible-test-traffic-class", + "description": "Ansible Test", + "matchers": [ + {"type": "cidr", "inverse": False, "members": ["192.168.50.0/24"]}, + {"type": "bucket", "inverse": False, "members": ["ansible-test1", "ansible-test2"]}, + ], + "limits": [{"type": "aggregateBandwidthIn", "value": 888888}], + } + }, + None, + ), + "traffic_class_record_rename": ( + { + "data": { + "id": "6b2946e6-7fed-40d0-9262-8e922580aba7", + "name": "ansible-test-traffic-class-rename", + "description": "Ansible Test", + "matchers": [ + {"type": "cidr", "inverse": False, "members": ["192.168.50.0/24"]}, + {"type": "bucket", "inverse": False, "members": ["ansible-test1", "ansible-test2"]}, + ], + "limits": [], + } + }, + None, + ), + "traffic_classes": ( + { + "data": [ + { + "id": "6b2946e6-7fed-40d0-9262-8e922580aba7", + "name": "ansible-test-traffic-class", + "description": "Ansible Test", + }, + { + "id": "531e6be1-e9b1-4010-bb79-03437c7c13d2", + "name": "policy-test1", + "description": "First test policy", + }, + ] + }, + None, + ), + "node_health": ( + { + "data": [ + { + "id": "0b1866ed-d6e7-41b4-815f-bf867348b76b", + "isPrimaryAdmin": True, + "name": "SITE1-ADM1", + "siteId": "ae56d06d-bd83-46bd-adce-77146b1d94bd", + "siteName": "SITE1", + "severity": "normal", + "state": "connected", + "type": "adminNode", + }, + { + "id": "7bb5bf05-a04c-4344-8abd-08c5c4048666", + "isPrimaryAdmin": None, + "name": "SITE1-G1", + "siteId": "ae56d06d-bd83-46bd-adce-77146b1d94bd", + "siteName": "SITE1", + "severity": "normal", + "state": "connected", + "type": "apiGatewayNode", + }, + { + "id": "970ad050-b68b-4aae-a94d-aef73f3095c4", + "isPrimaryAdmin": False, + "name": "SITE2-ADM1", + "siteId": "7c24002e-5157-43e9-83e5-02db9b265b02", + "siteName": "SITE2", + "severity": "normal", + "state": "connected", + "type": "adminNode", + }, + ] + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """a group of related Unit Tests""" + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "matchers": [ + {"type": "bucket", "members": ["ansible-test1", "ansible-test2"]}, + {"type": "cidr", "members": ["192.168.50.0/24"]}, + ], + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "name": "ansible-test-traffic-class", + "matchers": [ + {"type": "bucket", "members": ["ansible-test1", "ansible-test2"]}, + {"type": "cidr", "members": ["192.168.50.0/24"]}, + ], + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_traffic_class(self): + return dict( + { + "state": "present", + "name": "ansible-test-traffic-class", + "description": "Ansible Test", + "matchers": [ + {"type": "bucket", "members": ["ansible-test1", "ansible-test2"]}, + {"type": "cidr", "members": ["192.168.50.0/24"]}, + ], + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_traffic_class(self): + return dict( + { + "state": "absent", + "name": "ansible-test-traffic-class", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_when_required_args_missing(self, mock_request): + """required arguments are reported as errors""" + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_traffic_classes_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_pass_when_required_args_present(self, mock_request): + """required arguments are reported as errors""" + mock_request.side_effect = [ + SRR["node_health"], # get + ] + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_traffic_classes_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_pass_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_grid_traffic_class_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_traffic_class()) + mock_request.side_effect = [ + SRR["empty_good"], # get + SRR["traffic_class_record"], # post + SRR["end_of_sequence"], + ] + my_obj = grid_traffic_classes_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_grid_traffic_class_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_create_na_sg_grid_traffic_class_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_traffic_class() + set_module_args(args) + mock_request.side_effect = [ + SRR["traffic_classes"], # get + SRR["traffic_class_record"], # get + SRR["end_of_sequence"], + ] + my_obj = grid_traffic_classes_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_create_na_sg_grid_traffic_class_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_traffic_class_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_traffic_class() + args["description"] = "Ansible Test with Limit" + args["limits"] = [{"type": "aggregateBandwidthIn", "value": 888888}] + set_module_args(args) + mock_request.side_effect = [ + SRR["traffic_classes"], # get + SRR["traffic_class_record"], # get + SRR["traffic_class_record_updated"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_traffic_classes_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_traffic_class_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_rename_na_sg_grid_traffic_class_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_traffic_class() + args["policy_id"] = "6b2946e6-7fed-40d0-9262-8e922580aba7" + args["name"] = "ansible-test-traffic-class-rename" + set_module_args(args) + mock_request.side_effect = [ + SRR["traffic_class_record"], # get + SRR["traffic_class_record_rename"], # put + SRR["end_of_sequence"], + ] + my_obj = grid_traffic_classes_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_rename_na_sg_grid_traffic_class_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_delete_na_sg_grid_traffic_class_pass(self, mock_request): + args = self.set_args_delete_na_sg_grid_traffic_class() + set_module_args(args) + mock_request.side_effect = [ + SRR["traffic_classes"], # get + SRR["traffic_class_record"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + my_obj = grid_traffic_classes_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_grid_traffic_class_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_grid_traffic_class_bad_policy_id_fail(self, mock_request): + args = self.set_args_create_na_sg_grid_traffic_class() + args["policy_id"] = "ffffffff-ffff-aaaa-aaaa-000000000000" + args["description"] = "Bad ID" + set_module_args(args) + mock_request.side_effect = [ + SRR["not_found"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + my_obj = grid_traffic_classes_module() + my_obj.apply() + print("Info: test_update_na_sg_grid_traffic_class_bad_policy_id_fail: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["failed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_user.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_user.py new file mode 100644 index 000000000..c8ec38c09 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_grid_user.py @@ -0,0 +1,476 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Grid User Ansible module: na_sg_grid_user""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_user import ( + SgGridUser as grid_user_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ({"status": "error", "code": 404, "data": {}}, {"key": "error.404"},), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "pw_change_good": ({"code": 204}, None), + "grid_groups": ( + { + "data": [ + { + "displayName": "TestGridGroup1", + "uniqueName": "group/testgridgroup1", + "accountId": "12345678901234567890", + "id": "12345678-abcd-1234-abcd-1234567890ab", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup1", + }, + { + "displayName": "TestGridGroup2", + "uniqueName": "group/testgridgroup2", + "accountId": "12345678901234567890", + "id": "87654321-abcd-1234-cdef-1234567890ab", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup2", + }, + ] + }, + None, + ), + "grid_users": ( + { + "data": [ + { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testgriduser", + "uniqueName": "user/ansible-sg-adm-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testgriduser", + "federated": False, + "memberOf": ["12345678-abcd-1234-abcd-1234567890ab"], + "disable": False, + } + ] + }, + None, + ), + "grid_user_record_no_group": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testgriduser", + "uniqueName": "user/ansible-sg-adm-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testgriduser", + "federated": False, + "disable": False, + } + }, + None, + ), + "grid_user_record": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testgriduser", + "uniqueName": "user/ansible-sg-adm-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testgriduser", + "federated": False, + "memberOf": ["12345678-abcd-1234-abcd-1234567890ab"], + "disable": False, + } + }, + None, + ), + "grid_user_record_update": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testgriduser", + "uniqueName": "user/ansible-sg-adm-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testgriduser", + "federated": False, + "memberOf": [ + "12345678-abcd-1234-abcd-1234567890ab", + "87654321-abcd-1234-cdef-1234567890ab", + ], + "disable": False, + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "full_name": "TestUser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_user_no_group(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_grid_user(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "member_of": ["group/testgridgroup1"], + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_grid_user(self): + return dict( + { + "state": "absent", + "unique_name": "user/testuser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + grid_user_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + grid_user_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + def test_module_fail_with_bad_unique_name(self): + """ error returned if unique_name doesn't start with user or federated_user """ + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_default_args_pass_check() + args["unique_name"] = "noprefixuser" + set_module_args(args) + grid_user_module() + print( + "Info: test_module_fail_with_bad_unique_name: %s" % exc.value.args[0]["msg"] + ) + + def set_args_create_na_sg_grid_user_with_password(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "member_of": ["group/testgridgroup1"], + "password": "netapp123", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_grid_user_no_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_user_no_group()) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["grid_user_record_no_group"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_grid_user_no_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_grid_user_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_user()) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["grid_groups"], # get + SRR["grid_user_record"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_grid_user_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_grid_user_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_user()) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["grid_user_record"], # get + SRR["grid_groups"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_grid_user_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_grid_user_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_user() + args["member_of"] = ["group/testgridgroup1", "group/testgridgroup2"] + + set_module_args(args) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["grid_user_record"], # get + SRR["grid_groups"], # get + SRR["grid_user_record_update"], # put + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_grid_user_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_delete_na_sg_grid_user_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_grid_user()) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["grid_user_record"], # get + SRR["grid_groups"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_grid_user_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + # create user and set pass + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_grid_user_and_set_password_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_grid_user_with_password()) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["grid_groups"], # get + SRR["grid_user_record"], # post + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_grid_user_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # Idempotent user with password defined + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_grid_user_and_set_password_pass( + self, mock_request + ): + set_module_args(self.set_args_create_na_sg_grid_user_with_password()) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["grid_user_record"], # get + SRR["grid_groups"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_grid_user_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + # update user and set pass + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_grid_user_and_set_password_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_user_with_password() + args["member_of"] = ["group/testgridgroup1", "group/testgridgroup2"] + args["update_password"] = "always" + + set_module_args(args) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["grid_user_record"], # get + SRR["grid_groups"], # get + SRR["grid_user_record_update"], # put + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_update_na_sg_grid_user_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # set pass only + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_set_na_sg_grid_user_password_pass(self, mock_request): + args = self.set_args_create_na_sg_grid_user_with_password() + args["update_password"] = "always" + + set_module_args(args) + my_obj = grid_user_module() + mock_request.side_effect = [ + SRR["grid_user_record"], # get + SRR["grid_groups"], # get + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_set_na_sg_grid_user_password_pass: %s" % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # attempt to set password on federated user + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_fail_set_federated_user_password(self, mock_request): + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_args_create_na_sg_grid_user_with_password() + args["unique_name"] = "federated-user/abc123" + args["update_password"] = "always" + set_module_args(args) + grid_user_module() + print( + "Info: test_fail_set_federated_user_password: %s" % repr(exc.value.args[0]) + ) diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_container.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_container.py new file mode 100644 index 000000000..21c49a556 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_container.py @@ -0,0 +1,348 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Org Container Ansible module: na_sg_org_container""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys + +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip("Skipping Unit Tests on 2.6 as requests is not available") + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_org_container import ( + SgOrgContainer as org_container_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": (None, None), + "version_114": ({"data": {"productVersion": "11.4.0-20200721.1338.d3969b3"}}, None), + "version_116": ({"data": {"productVersion": "11.6.0-20211120.0301.850531e"}}, None), + "global_compliance_disabled": ( + { + "data": { + "complianceEnabled": False, + } + }, + None, + ), + "global_compliance_enabled": ( + { + "data": { + "complianceEnabled": True, + } + }, + None, + ), + "org_containers": ( + {"data": [{"name": "testbucket", "creationTime": "2020-02-04T12:43:50.777Z", "region": "us-east-1"}]}, + None, + ), + "org_container_record": ( + {"data": {"name": "testbucket", "creationTime": "2020-02-04T12:43:50.777Z", "region": "us-east-1"}}, + None, + ), + "org_container_objectlock_record": ( + { + "data": { + "name": "testbucket", + "creationTime": "2020-02-04T12:43:50.777Z", + "region": "us-east-1", + "s3ObjectLock": {"enabled": True}, + } + }, + None, + ), + "org_container_record_update": ( + { + "data": { + "name": "testbucket", + "creationTime": "2020-02-04T12:43:50.777Z", + "region": "us-east-1", + "compliance": {"autoDelete": False, "legalHold": False}, + } + }, + None, + ), + "org_container_versioning_disabled": ({"data": {"versioningEnabled": False, "versioningSuspended": False}}, None), + "org_container_versioning_enabled": ({"data": {"versioningEnabled": True, "versioningSuspended": False}}, None), + "org_container_versioning_suspended": ({"data": {"versioningEnabled": False, "versioningSuspended": True}}, None), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """a group of related Unit Tests""" + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + {"name": "testbucket", "auth_token": "01234567-5678-9abc-78de-9fgabc123def", "validate_certs": False} + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "name": "testbucket", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_org_container(self): + return dict( + { + "state": "present", + "name": "testbucket", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_org_container(self): + return dict( + { + "state": "absent", + "name": "testbucket", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_when_required_args_missing(self, mock_request): + """required arguments are reported as errors""" + mock_request.side_effect = [ + SRR["version_114"], + ] + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + org_container_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_when_required_args_present(self, mock_request): + """required arguments are reported as errors""" + mock_request.side_effect = [ + SRR["version_114"], + ] + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + org_container_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_fail_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_org_container_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_container()) + mock_request.side_effect = [ + SRR["version_114"], + SRR["empty_good"], # get + SRR["org_container_record"], # post + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_org_container_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_create_na_sg_org_container_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_container()) + mock_request.side_effect = [ + SRR["version_114"], + SRR["org_containers"], # get + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_create_na_sg_org_container_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_org_container_pass(self, mock_request): + args = self.set_args_create_na_sg_org_container() + args["compliance"] = {"auto_delete": False, "legal_hold": False} + set_module_args(args) + mock_request.side_effect = [ + SRR["version_114"], + SRR["org_containers"], # get + SRR["org_container_record_update"], # put + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_org_container_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_delete_na_sg_org_container_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_org_container()) + mock_request.side_effect = [ + SRR["version_114"], + SRR["org_containers"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_org_container_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_minimum_version_not_met_object_lock(self, mock_request): + args = self.set_args_create_na_sg_org_container() + args["s3_object_lock_enabled"] = True + set_module_args(args) + mock_request.side_effect = [ + SRR["version_114"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + org_container_module() + print("Info: test_module_fail_minimum_version_not_met_object_lock: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_org_container_objectlock_global_compliance_fail(self, mock_request): + args = self.set_args_create_na_sg_org_container() + args["s3_object_lock_enabled"] = True + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], + SRR["empty_good"], # get + SRR["global_compliance_disabled"], # get + ] + my_obj = org_container_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_org_container_objectlock_global_compliance_fail: %s" % repr(exc.value.args[0])) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_org_container_objectlock_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_container()) + mock_request.side_effect = [ + SRR["version_116"], + SRR["empty_good"], # get + SRR["global_compliance_enabled"], # get + SRR["org_container_objectlock_record"], # post + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_org_container_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_module_fail_minimum_version_not_met_versioning(self, mock_request): + args = self.set_args_create_na_sg_org_container() + args["bucket_versioning_enabled"] = True + set_module_args(args) + mock_request.side_effect = [ + SRR["version_114"], # get + ] + with pytest.raises(AnsibleFailJson) as exc: + org_container_module() + print("Info: test_module_fail_minimum_version_not_met_versioning: %s" % exc.value.args[0]["msg"]) + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_create_na_sg_org_container_with_versioning_pass(self, mock_request): + args = self.set_args_create_na_sg_org_container() + args["bucket_versioning_enabled"] = True + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], + SRR["empty_good"], # get + SRR["org_container_record"], # post + SRR["org_container_versioning_enabled"], # post + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_org_container_with_versioning_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_update_na_sg_org_container_enable_versioning_pass(self, mock_request): + args = self.set_args_create_na_sg_org_container() + args["bucket_versioning_enabled"] = True + set_module_args(args) + mock_request.side_effect = [ + SRR["version_116"], + SRR["org_containers"], # get + SRR["org_container_versioning_disabled"], # get + SRR["org_container_versioning_enabled"], # put + SRR["end_of_sequence"], + ] + my_obj = org_container_module() + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_org_container_enable_versioning_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_group.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_group.py new file mode 100644 index 000000000..c229130c2 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_group.py @@ -0,0 +1,403 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Org Group Ansible module: na_sg_org_group""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_org_group import ( + SgOrgGroup as org_group_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "org_groups": ( + { + "data": [ + { + "displayName": "TestOrgGroup", + "uniqueName": "group/testorggroup", + "policies": { + "management": { + "manageAllContainers": True, + "manageEndpoints": True, + "manageOwnS3Credentials": True, + }, + "s3": { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::*", + } + ] + }, + }, + "accountId": "12345678901234567890", + "id": "00000000-0000-0000-0000-000000000000", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testorggroup", + } + ] + }, + None, + ), + "org_group_record": ( + { + "data": { + "displayName": "TestOrgGroup", + "uniqueName": "group/testorggroup", + "policies": { + "management": { + "manageAllContainers": True, + "manageEndpoints": True, + "manageOwnS3Credentials": True, + }, + "s3": { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::*", + } + ] + }, + }, + "accountId": "12345678901234567890", + "id": "00000000-0000-0000-0000-000000000000", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testorggroup", + } + }, + None, + ), + "org_group_record_update": ( + { + "data": { + "displayName": "TestOrgGroup", + "uniqueName": "group/testorggroup", + "policies": { + "management": { + "manageAllContainers": True, + "manageEndpoints": True, + "manageOwnS3Credentials": True, + # "rootAccess": False, + }, + "s3": { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::mybucket/*", + } + ] + }, + }, + "accountId": "12345678901234567890", + "id": "00000000-0000-0000-0000-000000000000", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testorggroup", + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "display_name": "TestGroup", + "management_policy": { + "manage_all_containers": True, + "manage_endpoints": True, + "manage_own_s3_credentials": True, + "root_access": False, + }, + "s3_policy": { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::*", + } + ] + }, + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "display_name": "TestGroup", + "unique_name": "group/testgroup", + "management_policy": { + "manage_all_containers": True, + "manage_endpoints": True, + "manage_own_s3_credentials": True, + "root_access": False, + }, + "s3_policy": { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::*", + } + ] + }, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_org_group(self): + return dict( + { + "state": "present", + "display_name": "TestOrgGroup", + "unique_name": "group/testorggroup", + "management_policy": { + "manage_all_containers": True, + "manage_endpoints": True, + "manage_own_s3_credentials": True, + "root_access": False, + }, + "s3_policy": { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::*", + } + ] + }, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_org_group(self): + return dict( + { + "state": "absent", + # "display_name": "TestOrgGroup", + "unique_name": "group/testorggroup", + # "management_policy": { + # "manage_all_containers": True, + # "manage_endpoints": True, + # "manage_own_s3_credentials": True, + # "root_access": False, + # }, + # "s3_policy": { + # "Statement": [ + # { + # "Effect": "Allow", + # "Action": "s3:*", + # "Resource": "arn:aws:s3:::*", + # } + # ] + # }, + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + org_group_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + org_group_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + def test_module_fail_with_bad_unique_name(self): + """ error returned if unique_name doesn't start with group or federated_group """ + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_default_args_pass_check() + args["unique_name"] = "noprefixgroup" + set_module_args(args) + org_group_module() + print( + "Info: test_module_fail_with_bad_unique_name: %s" + % exc.value.args[0]["msg"] + ) + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_org_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_group()) + my_obj = org_group_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["org_group_record"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_org_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_org_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_group()) + my_obj = org_group_module() + mock_request.side_effect = [ + SRR["org_group_record"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_org_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_org_group_pass(self, mock_request): + args = self.set_args_create_na_sg_org_group() + args["s3_policy"] = ( + { + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "arn:aws:s3:::mybucket/*", + } + ] + }, + ) + + args["management_policy"]["manage_endpoints"] = False + + set_module_args(args) + my_obj = org_group_module() + mock_request.side_effect = [ + SRR["org_group_record"], # get + SRR["org_group_record_update"], # put + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_update_na_sg_org_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_delete_na_sg_org_group_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_org_group()) + my_obj = org_group_module() + mock_request.side_effect = [ + SRR["org_group_record"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_delete_na_sg_org_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_identity_federation.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_identity_federation.py new file mode 100644 index 000000000..b02259005 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_identity_federation.py @@ -0,0 +1,354 @@ +# (c) 2021, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Tenant Identity Federation Ansible module: na_sg_org_identity_federation""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_org_identity_federation import ( + SgOrgIdentityFederation as org_identity_federation_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "check_mode_good": (None, None), + "identity_federation_unset": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": True, + "type": "", + "ldapServiceType": "", + "hostname": "", + "port": 0, + "username": "", + "password": None, + "baseGroupDn": "", + "baseUserDn": "", + "disableTLS": False, + "enableLDAPS": False, + "caCert": "", + } + }, + None, + ), + "identity_federation": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": False, + "type": "ldap", + "ldapServiceType": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "disableTLS": True, + "enableLDAPS": False, + "caCert": "", + } + }, + None, + ), + "identity_federation_tls": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": False, + "type": "ldap", + "ldapServiceType": "Active Directory", + "hostname": "ad.example.com", + "port": 636, + "username": "binduser", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "disableTLS": False, + "enableLDAPS": True, + "caCert": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIF+DCCBOCgAwIBAgITRwAAAAIg5KzMrJo+kQAAAAAAAjANBgkqhkiG9w0BAQUF\n" + "ADBlMRIwEAYKCZImiZPyLGQBGRYCYXUxFjAUBgoJkiaJk/IsZAEZFgZuZXRhcHAx\n" + "FjAUBgoJkiaJk/IsZAEZFgZhdXNuZ3MxHzAdBgNVBAMTFmF1c25ncy1NRUxOR1NE\n" + "QzAxLUNBLTEwHhcNMjEwMjExMDkzMTIwWhcNMjMwMjExMDk0MTIwWjAAMIIBIjAN\n" + "BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt2xPi4FS4Uc37KrDLEXXUoc4lhhT\n" + "uQmMnLc0PYZCIpzYOaosFIeGqco3woiC7wSZJ2whKE4RDcxxgE+azuGiSWVjIxIL\n" + "AimmcDhFid/T3KRN5jmkjBzUKuPBYzZBFih8iU9056rqgN7eMKQYjRwPeV0+AeiB\n" + "irw46OgkwVQu3shEUtXxZPP2Mb6Md23+4vSmcElUcW28Opt2q/M5fs7DNomG3eaG\n" + "-----END CERTIFICATE-----\n" + ), + } + }, + None, + ), + "identity_federation_disable": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "disable": True, + "type": "ldap", + "ldapServiceType": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "********", + "baseGroupDn": "DC=example,DC=com", + "baseUserDn": "DC=example,DC=com", + "disableTLS": True, + "enableLDAPS": False, + "caCert": "", + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "Disabled", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "Disabled", + "state": "present", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_org_identity_federation(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 389, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "Disabled", + "state": "present", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_set_na_sg_org_identity_federation_tls(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "port": 636, + "username": "binduser", + "password": "bindpass", + "base_group_dn": "DC=example,DC=com", + "base_user_dn": "DC=example,DC=com", + "tls": "LDAPS", + "ca_cert": ( + "-----BEGIN CERTIFICATE-----\n" + "MIIF+DCCBOCgAwIBAgITRwAAAAIg5KzMrJo+kQAAAAAAAjANBgkqhkiG9w0BAQUF\n" + "ADBlMRIwEAYKCZImiZPyLGQBGRYCYXUxFjAUBgoJkiaJk/IsZAEZFgZuZXRhcHAx\n" + "FjAUBgoJkiaJk/IsZAEZFgZhdXNuZ3MxHzAdBgNVBAMTFmF1c25ncy1NRUxOR1NE\n" + "QzAxLUNBLTEwHhcNMjEwMjExMDkzMTIwWhcNMjMwMjExMDk0MTIwWjAAMIIBIjAN\n" + "BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt2xPi4FS4Uc37KrDLEXXUoc4lhhT\n" + "uQmMnLc0PYZCIpzYOaosFIeGqco3woiC7wSZJ2whKE4RDcxxgE+azuGiSWVjIxIL\n" + "AimmcDhFid/T3KRN5jmkjBzUKuPBYzZBFih8iU9056rqgN7eMKQYjRwPeV0+AeiB\n" + "irw46OgkwVQu3shEUtXxZPP2Mb6Md23+4vSmcElUcW28Opt2q/M5fs7DNomG3eaG\n" + "-----END CERTIFICATE-----\n" + ), + "state": "present", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_remove_na_sg_org_identity_federation(self): + return dict( + { + "ldap_service_type": "Active Directory", + "hostname": "ad.example.com", + "state": "absent", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + org_identity_federation_module() + print("Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"]) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + org_identity_federation_module() + exit_json(changed=True, msg="Induced arguments check") + print("Info: test_module_fail_when_required_args_present: %s" % exc.value.args[0]["msg"]) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_set_na_sg_org_identity_federation_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_org_identity_federation()) + my_obj = org_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation_unset"], # get + SRR["identity_federation"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_set_na_sg_org_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_idempotent_set_na_sg_org_identity_federation_pass(self, mock_request): + args = self.set_args_set_na_sg_org_identity_federation() + # remove password + del args["password"] + set_module_args(args) + my_obj = org_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_idempotent_set_na_sg_org_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert not exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_set_na_sg_org_identity_federation_tls_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_org_identity_federation_tls()) + my_obj = org_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation_unset"], # get + SRR["identity_federation_tls"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_set_na_sg_org_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_remove_na_sg_org_identity_federation_pass(self, mock_request): + set_module_args(self.set_args_remove_na_sg_org_identity_federation()) + my_obj = org_identity_federation_module() + mock_request.side_effect = [ + SRR["identity_federation"], # get + SRR["identity_federation_disable"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_remove_na_sg_org_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + # test check mode + + @patch("ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request") + def test_check_mode_na_sg_org_identity_federation_pass(self, mock_request): + set_module_args(self.set_args_set_na_sg_org_identity_federation()) + my_obj = org_identity_federation_module() + my_obj.module.check_mode = True + mock_request.side_effect = [ + SRR["identity_federation_unset"], # get + SRR["check_mode_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_check_mode_na_sg_org_identity_federation_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + assert exc.value.args[0]["msg"] == "Connection test successful" diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_info.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_info.py new file mode 100644 index 000000000..e24c7cd46 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_info.py @@ -0,0 +1,263 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' Unit Tests NetApp StorageGRID Org Ansible module: na_sg_org_info ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import patch + +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_org_info \ + import NetAppSgGatherInfo as sg_org_info_module + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_good': ({'data': []}, None), + 'end_of_sequence': (None, 'Unexpected call to send_request'), + 'generic_error': (None, 'Expected error'), + 'org_compliance_global': ({'data': {}}, None), + 'org_config': ({'data': {}}, None), + 'org_config_product_version': ({'data': {}}, None), + 'org_containers': ({'data': {}}, None), + 'org_deactivated_features': ({'data': {}}, None), + 'org_endpoints': ({'data': []}, None), + 'org_groups': ({'data': []}, None), + 'org_identity_source': ({'data': {}}, None), + 'org_regions': ({'data': []}, None), + 'org_users_current_user_s3_access_keys': ({'data': []}, None), + 'org_usage': ({'data': {}}, None), + 'org_users': ( + { + 'data': [ + { + 'accountId': '99846664116007910793', + 'disable': False, + 'federated': False, + 'fullName': 'Root', + 'id': '00000000-0000-0000-0000-000000000000', + 'memberOf': None, + 'uniqueName': 'root', + 'userURN': 'urn:sgws:identity::99846664116007910793:root' + }, + ] + }, + None + ), + 'org_users_root': ( + { + 'data': { + 'accountId': '99846664116007910793', + 'disable': False, + 'federated': False, + 'fullName': 'Root', + 'id': '00000000-0000-0000-0000-000000000000', + 'memberOf': None, + 'uniqueName': 'root', + 'userURN': 'urn:sgws:identity::99846664116007910793:root' + }, + }, + None + ), + 'versions': ({'data': [2, 3]}, None), +} + + +def set_module_args(args): + ''' Prepare arguments so that they will be picked up during module creation ''' + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + ''' Exception class to be raised by module.exit_json and caught by the test case ''' + pass + + +class AnsibleFailJson(Exception): + ''' Exception class to be raised by module.fail_json and caught by the test case ''' + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + ''' Function to patch over exit_json; package return data into an exception ''' + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + ''' Function to patch over fail_json; package return data into an exception ''' + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + ''' A group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + 'api_url': 'sgmi.example.com', + } + ) + + def set_default_args_pass_check(self): + return dict( + { + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + } + ) + + def set_default_optional_args_pass_check(self): + return dict( + { + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + 'gather_subset': ['all'], + 'parameters': {'limit': 5}, + } + ) + + def set_args_run_sg_gather_facts_for_all_info(self): + return dict({ + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + }) + + def set_args_run_sg_gather_facts_for_org_users_info(self): + return dict({ + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + 'gather_subset': ['org_users_info'], + }) + + def set_args_run_sg_gather_facts_for_org_users_and_org_users_root_info(self): + return dict({ + 'api_url': 'sgmi.example.com', + 'auth_token': '01234567-5678-9abc-78de-9fgabc123def', + 'validate_certs': False, + 'gather_subset': ['org_users_info', 'org/users/root'], + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + sg_org_info_module() + print( + 'Info: test_module_fail_when_required_args_missing: %s' + % exc.value.args[0]['msg'] + ) + + def test_module_pass_when_required_args_present(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + sg_org_info_module() + exit_json(changed=True, msg='Induced arguments check') + print( + 'Info: test_module_pass_when_required_args_present: %s' + % exc.value.args[0]['msg'] + ) + assert exc.value.args[0]['changed'] + + def test_module_pass_when_optional_args_present(self): + ''' Optional arguments are reported as pass ''' + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_optional_args_pass_check()) + sg_org_info_module() + exit_json(changed=True, msg='Induced arguments check') + print( + 'Info: test_module_pass_when_optional_args_present: %s' + % exc.value.args[0]['msg'] + ) + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request') + def test_run_sg_gather_facts_for_all_info_pass(self, mock_request): + set_module_args(self.set_args_run_sg_gather_facts_for_all_info()) + my_obj = sg_org_info_module() + gather_subset = [ + 'org/compliance-global', + 'org/config', + 'org/config/product-version', + 'org/containers', + 'org/deactivated-features', + 'org/endpoints', + 'org/groups', + 'org/identity-source', + 'org/regions', + 'org/users/current-user/s3-access-keys', + 'org/usage', + 'org/users', + 'org/users/root', + 'versions', + ] + mock_request.side_effect = [ + SRR['org_compliance_global'], + SRR['org_config'], + SRR['org_config_product_version'], + SRR['org_containers'], + SRR['org_deactivated_features'], + SRR['org_endpoints'], + SRR['org_groups'], + SRR['org_identity_source'], + SRR['org_regions'], + SRR['org_users_current_user_s3_access_keys'], + SRR['org_usage'], + SRR['org_users'], + SRR['org_users_root'], + SRR['versions'], + SRR['end_of_sequence'], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_run_sg_gather_facts_for_all_info_pass: %s' % repr(exc.value.args)) + assert set(exc.value.args[0]['sg_info']) == set(gather_subset) + + @patch('ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request') + def test_run_sg_gather_facts_for_org_users_info_pass(self, mock_request): + set_module_args(self.set_args_run_sg_gather_facts_for_org_users_info()) + my_obj = sg_org_info_module() + gather_subset = ['org/users'] + mock_request.side_effect = [ + SRR['org_users'], + SRR['end_of_sequence'], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_run_sg_gather_facts_for_org_users_info_pass: %s' % repr(exc.value.args)) + assert set(exc.value.args[0]['sg_info']) == set(gather_subset) + + @patch('ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request') + def test_run_sg_gather_facts_for_org_users_and_org_users_root_info_pass(self, mock_request): + set_module_args(self.set_args_run_sg_gather_facts_for_org_users_and_org_users_root_info()) + my_obj = sg_org_info_module() + gather_subset = ['org/users', 'org/users/root'] + mock_request.side_effect = [ + SRR['org_users'], + SRR['org_users_root'], + SRR['end_of_sequence'], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print('Info: test_run_sg_gather_facts_for_org_users_and_org_users_root_info_pass: %s' % repr(exc.value.args)) + assert set(exc.value.args[0]['sg_info']) == set(gather_subset) diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user.py new file mode 100644 index 000000000..8fcec6734 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user.py @@ -0,0 +1,476 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Org Group Ansible module: na_sg_org_user""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_org_user import ( + SgOrgUser as org_user_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ({"status": "error", "code": 404, "data": {}}, {"key": "error.404"},), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "pw_change_good": ({"code": 204}, None), + "org_groups": ( + { + "data": [ + { + "displayName": "TestOrgGroup1", + "uniqueName": "group/testorggroup1", + "accountId": "12345678901234567890", + "id": "12345678-abcd-1234-abcd-1234567890ab", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testorggroup1", + }, + { + "displayName": "TestOrgGroup2", + "uniqueName": "group/testorggroup2", + "accountId": "12345678901234567890", + "id": "87654321-abcd-1234-cdef-1234567890ab", + "federated": False, + "groupURN": "urn:sgws:identity::12345678901234567890:group/testorggroup2", + }, + ] + }, + None, + ), + "org_users": ( + { + "data": [ + { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testorguser", + "uniqueName": "user/ansible-sg-demo-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testorguser", + "federated": False, + "memberOf": ["12345678-abcd-1234-abcd-1234567890ab"], + "disable": False, + } + ] + }, + None, + ), + "org_user_record_no_group": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testorguser", + "uniqueName": "user/ansible-sg-demo-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testorguser", + "federated": False, + "disable": False, + } + }, + None, + ), + "org_user_record": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testorguser", + "uniqueName": "user/ansible-sg-demo-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testorguser", + "federated": False, + "memberOf": ["12345678-abcd-1234-abcd-1234567890ab"], + "disable": False, + } + }, + None, + ), + "org_user_record_update": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testorguser", + "uniqueName": "user/ansible-sg-demo-user1", + "userURN": "urn:sgws:identity::12345678901234567890:user/testorguser", + "federated": False, + "memberOf": [ + "12345678-abcd-1234-abcd-1234567890ab", + "87654321-abcd-1234-cdef-1234567890ab", + ], + "disable": False, + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "full_name": "TestUser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_org_user_no_group(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_org_user(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "member_of": ["group/testorggroup1"], + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_org_user(self): + return dict( + { + "state": "absent", + # "full_name": "TestUser", + "unique_name": "user/testuser", + # "member_of": ["group/testorggroup1"], + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + org_user_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + org_user_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + def test_module_fail_with_bad_unique_name(self): + """ error returned if unique_name doesn't start with user or federated_user """ + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_default_args_pass_check() + args["unique_name"] = "noprefixuser" + set_module_args(args) + org_user_module() + print( + "Info: test_module_fail_with_bad_unique_name: %s" % exc.value.args[0]["msg"] + ) + + def set_args_create_na_sg_org_user_with_password(self): + return dict( + { + "state": "present", + "full_name": "TestUser", + "unique_name": "user/testuser", + "member_of": ["group/testorggroup1"], + "password": "netapp123", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_org_user_no_group_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_user_no_group()) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["org_user_record_no_group"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_org_user_no_group_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_org_user_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_user()) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["org_groups"], # get + SRR["org_user_record"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_create_na_sg_org_user_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_org_user_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_user()) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_groups"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_org_user_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_org_user_pass(self, mock_request): + args = self.set_args_create_na_sg_org_user() + args["member_of"] = ["group/testorggroup1", "group/testorggroup2"] + + set_module_args(args) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_groups"], # get + SRR["org_user_record_update"], # put + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_update_na_sg_org_user_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_delete_na_sg_org_user_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_org_user()) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_groups"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print("Info: test_delete_na_sg_org_user_pass: %s" % repr(exc.value.args[0])) + assert exc.value.args[0]["changed"] + + # create user and set pass + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_org_user_and_set_password_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_user_with_password()) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["not_found"], # get + SRR["org_groups"], # get + SRR["org_user_record"], # post + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_org_user_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # Idempotent user with password defined + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_org_user_and_set_password_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_user_with_password()) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_groups"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_org_user_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + # update user and set pass + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_update_na_sg_org_user_and_set_password_pass(self, mock_request): + args = self.set_args_create_na_sg_org_user_with_password() + args["member_of"] = ["group/testorggroup1", "group/testorggroup2"] + args["update_password"] = "always" + + set_module_args(args) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_groups"], # get + SRR["org_user_record_update"], # put + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_update_na_sg_org_user_and_set_password_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # set pass only + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_set_na_sg_org_user_password_pass(self, mock_request): + args = self.set_args_create_na_sg_org_user_with_password() + args["update_password"] = "always" + + set_module_args(args) + my_obj = org_user_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_groups"], # get + SRR["pw_change_good"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_set_na_sg_org_user_password_pass: %s" % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + # attempt to set password on federated user + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_fail_set_federated_user_password(self, mock_request): + with pytest.raises(AnsibleFailJson) as exc: + args = self.set_args_create_na_sg_org_user_with_password() + args["unique_name"] = "federated-user/abc123" + args["update_password"] = "always" + set_module_args(args) + org_user_module() + print( + "Info: test_fail_set_federated_user_password: %s" % repr(exc.value.args[0]) + ) diff --git a/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user_s3_key.py b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user_s3_key.py new file mode 100644 index 000000000..53696bdbf --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/plugins/modules/test_na_sg_org_user_s3_key.py @@ -0,0 +1,238 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests NetApp StorageGRID Org Group Ansible module: na_sg_org_user_s3_key""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +import json +import pytest +import sys +try: + from requests import Response +except ImportError: + if sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + else: + raise + +from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest +from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( + patch, + Mock, +) +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_org_user_s3_key import ( + SgOrgUserS3Key as org_s3_key_module, +) + +# REST API canned responses when mocking send_request +SRR = { + # common responses + "empty_good": ({"data": []}, None), + "not_found": ( + {"status": "error", "code": 404, "data": {}}, + {"key": "error.404"}, + ), + "end_of_sequence": (None, "Unexpected call to send_request"), + "generic_error": (None, "Expected error"), + "delete_good": ({"code": 204}, None), + "org_user_record": ( + { + "data": { + "id": "09876543-abcd-4321-abcd-0987654321ab", + "accountId": "12345678901234567890", + "fullName": "testorguser", + "uniqueName": "user/testorguser", + "userURN": "urn:sgws:identity::12345678901234567890:user/testorguser", + "federated": False, + "memberOf": ["12345678-abcd-1234-abcd-1234567890ab"], + "disable": False, + } + }, + None, + ), + "org_s3_key": ( + { + "data": { + "id": "abcABC_01234-0123456789abcABCabc0123456789==", + "accountId": 12345678901234567000, + "displayName": "****************AB12", + "userURN": "urn:sgws:identity::12345678901234567890:root", + "userUUID": "09876543-abcd-4321-abcd-0987654321ab", + "expires": "2020-09-04T00:00:00.000Z", + "accessKey": "ABCDEFabcd1234567890", + "secretAccessKey": "abcABC+123456789012345678901234567890123", + } + }, + None, + ), +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if "changed" not in kwargs: + kwargs["changed"] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs["failed"] = True + raise AnsibleFailJson(kwargs) + + +class TestMyModule(unittest.TestCase): + """ a group of related Unit Tests """ + + def setUp(self): + self.mock_module_helper = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def set_default_args_fail_check(self): + return dict( + { + "unique_user_name": "user/testorguser", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_default_args_pass_check(self): + return dict( + { + "state": "present", + "unique_user_name": "user/testorguser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_create_na_sg_org_user_s3_keys(self): + return dict( + { + "state": "present", + "unique_user_name": "user/testorguser", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def set_args_delete_na_sg_org_user_s3_keys(self): + return dict( + { + "state": "absent", + "unique_user_name": "user/testorguser", + "access_key": "ABCDEFabcd1234567890", + "api_url": "gmi.example.com", + "auth_token": "01234567-5678-9abc-78de-9fgabc123def", + "validate_certs": False, + } + ) + + def test_module_fail_when_required_args_missing(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleFailJson) as exc: + set_module_args(self.set_default_args_fail_check()) + org_s3_key_module() + print( + "Info: test_module_fail_when_required_args_missing: %s" + % exc.value.args[0]["msg"] + ) + + def test_module_fail_when_required_args_present(self): + """ required arguments are reported as errors """ + with pytest.raises(AnsibleExitJson) as exc: + set_module_args(self.set_default_args_pass_check()) + org_s3_key_module() + exit_json(changed=True, msg="Induced arguments check") + print( + "Info: test_module_fail_when_required_args_present: %s" + % exc.value.args[0]["msg"] + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_create_na_sg_org_user_s3_key_pass(self, mock_request): + set_module_args(self.set_args_create_na_sg_org_user_s3_keys()) + my_obj = org_s3_key_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_s3_key"], # post + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_create_na_sg_org_user_s3_key_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_idempotent_create_na_sg_org_user_s3_key_pass(self, mock_request): + args = self.set_args_create_na_sg_org_user_s3_keys() + args["access_key"] = "ABCDEFabcd1234567890" + set_module_args(args) + my_obj = org_s3_key_module() + mock_request.side_effect = [ + SRR["org_user_record"], # get + SRR["org_s3_key"], # get + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_idempotent_create_na_sg_org_user_s3_key_pass: %s" + % repr(exc.value.args[0]) + ) + assert not exc.value.args[0]["changed"] + + @patch( + "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" + ) + def test_delete_na_sg_org_user_s3_keys_pass(self, mock_request): + set_module_args(self.set_args_delete_na_sg_org_user_s3_keys()) + my_obj = org_s3_key_module() + mock_request.side_effect = [ + SRR["org_s3_key"], # get + SRR["delete_good"], # delete + SRR["end_of_sequence"], + ] + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + print( + "Info: test_delete_na_sg_org_user_s3_keys_pass: %s" + % repr(exc.value.args[0]) + ) + assert exc.value.args[0]["changed"] diff --git a/ansible_collections/netapp/storagegrid/tests/unit/requirements.txt b/ansible_collections/netapp/storagegrid/tests/unit/requirements.txt new file mode 100644 index 000000000..b754473a9 --- /dev/null +++ b/ansible_collections/netapp/storagegrid/tests/unit/requirements.txt @@ -0,0 +1 @@ +requests ; python_version >= '2.7' diff --git a/ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..71c875355 --- /dev/null +++ b/ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,210 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to report a bug in netapp.um_info!** + + + ⚠ + Verify first that your issue is not [already reported on + GitHub][issue search] and keep in mind that we may have to keep + the current behavior because [every change breaks someone's + workflow][XKCD 1172]. + We try to be mindful about this. + + Also test if the latest release and devel branch are affected too. + + + **Tip:** If you are seeking community support, please consider + [Join our Slack community][ML||IRC]. + + + + [ML||IRC]: + https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg + + [issue search]: ../search?q=is%3Aissue&type=issues + + [XKCD 1172]: https://xkcd.com/1172/ + + +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with netapp.um_info from the devel branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + + + **Tip:** Cannot find it in this repository? Please be advised that + the source for some parts of the documentation are hosted outside + of this repository. If the page you are reporting describes + modules/plugins/etc that are not officially supported by the + Ansible Core Engineering team, there is a good chance that it is + coming from one of the [Ansible Collections maintained by the + community][collections org]. If this is the case, please make sure + to file an issue under the appropriate project there instead. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` below, under + the prompt line. Please don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + value: | + $ ansible --version + placeholder: | + $ ansible --version + ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) + config file = None + configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = ~/src/github/ansible/ansible/lib/ansible + ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections + executable location = bin/ansible + python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] + jinja version = 2.11.3 + libyaml = True + validations: + required: true + +- type: textarea + attributes: + label: UM_Info Collection Version + description: >- + UM_Info Collection Version. Run `ansible-galaxy collection` and copy the entire output + render: console + value: | + $ ansible-galaxy collection list + validations: + required: true + +- type: textarea + attributes: + label: Playbook + description: >- + The task from the playbook that is give you the issue + render: console + validations: + required: true + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: | + 1. Implement the following playbook: + + ```yaml + --- + # ping.yml + - hosts: all + gather_facts: false + tasks: + - ping: + ... + ``` + 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv` + 3. An error occurs. + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y and was shocked + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output and don't wrap it with tripple backticks — your + whole input will be turned into a code snippet automatically. + render: console + placeholder: >- + Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))} + Exception: + Traceback (most recent call last): + File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main + status = self.run(options, args) + File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run + wb.build(autobuilding=True) + File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build + self.requirement_set.prepare_files(self.finder) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files + ignore_dependencies=self.ignore_dependencies)) + File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file + session=self.session, hashes=hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url + hashes=hashes + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url + hashes) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url + stream=True, + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get + return self.request('GET', url, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request + return super(PipSession, self).request(method, url, *args, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request + resp = self.send(prep, **send_kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send + r = adapter.send(request, **kwargs) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send + resp = super(CacheControlAdapter, self).send(request, **kw) + File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send + raise SSLError(e, request=request) + SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),)) + ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2. + ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1. + validations: + required: true + + +- type: markdown + attributes: + value: > + *One last thing...* + + + Thank you for your collaboration! + + +... diff --git a/ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..126b4b9cd --- /dev/null +++ b/ansible_collections/netapp/um_info/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,100 @@ +--- +name: ✨ Feature request +description: Suggest an idea for this project + +body: +- type: markdown + attributes: + value: > + **Thank you for wanting to suggest a feature for netapp.um_info!** + + 💡 + Before you go ahead with your request, please first consider if it + would be useful for majority of the netapp.um_info users. As a + general rule of thumb, any feature that is only of interest to a + small sub group should be [implemented in a third-party Ansible + Collection][contribute to collections] or maybe even just your + project alone. Be mindful of the fact that the essential + netapp.um_info features have a broad impact. + + +
+ + ❗ Every change breaks someone's workflow. + + + + [![❗ Every change breaks someone's workflow. + ](https://imgs.xkcd.com/comics/workflow.png) + ](https://xkcd.com/1172/) +
+ + + ⚠ + Verify first that your idea is not [already requested on + GitHub][issue search]. + + Also test if the main branch does not already implement this. + + +- type: textarea + attributes: + label: Summary + description: > + Describe the new feature/improvement you would like briefly below. + + + What's the problem this feature will solve? + + What are you trying to do, that you are unable to achieve + with netapp.um_info as it currently stands? + + + * Provide examples of real-world use cases that this would enable + and how it solves the problem you described. + + * How do you solve this now? + + * Have you tried to work around the problem using other tools? + + * Could there be a different approach to solving this issue? + + placeholder: >- + I am trying to do X with netapp.um_info from the devel branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of netapp.um_info because of Z. + validations: + required: true + +- type: input + attributes: + label: Component Name + description: > + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + + + [collections org]: /ansible-collections + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + placeholder: >- + I asked on https://stackoverflow.com/.... and the community + advised me to do X, Y and Z. + validations: + required: true + +... diff --git a/ansible_collections/netapp/um_info/.github/workflows/coverage.yml b/ansible_collections/netapp/um_info/.github/workflows/coverage.yml new file mode 100644 index 000000000..39d5818c9 --- /dev/null +++ b/ansible_collections/netapp/um_info/.github/workflows/coverage.yml @@ -0,0 +1,45 @@ +name: NetApp.um_info Ansible Coverage + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Coverage on UM_INFO + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible stable-2.11 + run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/um_info/ + rsync -av . ansible_collections/netapp/um_info/ --exclude ansible_collections/netapp/um_info/ + + - name: Run Unit Tests + run: ansible-test units --coverage --color --docker --python 3.8 + working-directory: ansible_collections/netapp/um_info/ + + # ansible-test support producing code coverage date + - name: Generate coverage report + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ansible_collections/netapp/um_info/ + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + working-directory: ansible_collections/netapp/um_info/ + verbose: true \ No newline at end of file diff --git a/ansible_collections/netapp/um_info/.github/workflows/main.yml b/ansible_collections/netapp/um_info/.github/workflows/main.yml new file mode 100644 index 000000000..fbcf92002 --- /dev/null +++ b/ansible_collections/netapp/um_info/.github/workflows/main.yml @@ -0,0 +1,47 @@ +name: NetApp.um_info Ansible CI + +on: + push: + pull_request: + schedule: + - cron: '0 6 * * *' + +jobs: + sanity: + name: Sanity (${{ matrix.ansible }} on Um_info + runs-on: ubuntu-latest + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - stable-2.11 + - stable-2.12 + - devel + + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Install ansible (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Make directory to make ansible-test happy + run: | + pwd + mkdir -p ansible_collections/netapp/um_info/ + rsync -av . ansible_collections/netapp/um_info/ --exclude ansible_collections/netapp/um_info/ + + + - name: Run sanity tests Um_info + run: ansible-test sanity --docker -v --color + working-directory: ansible_collections/netapp/um_info/ + + - name: Run Unit Tests + run: ansible-test units --docker -v --color + working-directory: ansible_collections/netapp/um_info/ diff --git a/ansible_collections/netapp/um_info/CHANGELOG.rst b/ansible_collections/netapp/um_info/CHANGELOG.rst new file mode 100644 index 000000000..f5d538d11 --- /dev/null +++ b/ansible_collections/netapp/um_info/CHANGELOG.rst @@ -0,0 +1,78 @@ +==================================================== +NetApp Unified Manager Info Collection Release Notes +==================================================== + +.. contents:: Topics + + +v21.8.0 +======= + +Minor Changes +------------- + +- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + +v21.7.0 +======= + +Minor Changes +------------- + +- all modules - ability to trace API calls and responses. +- all modules - new ``max_records`` option to limit the amount of data in a single GET response. + +Bugfixes +-------- + +- all modules - report error when connecting to a server that does not run AIQUM. +- all modules - return all records rather than the first 1000 records (mostly for volumes). +- rename na_um_list_volumes.p to na_um_list_volumes.py + +v21.6.0 +======= + +Minor Changes +------------- + +- na_um_list_aggregates has been renamed na_um_aggregates_info. +- na_um_list_clusters has been renamed na_um_clusters_info. +- na_um_list_nodes has been renamed na_um_nodes_info. +- na_um_list_svms has been renamed na_um_svms_info. +- na_um_list_volumes has been renamed na_um_volumes_info. + +v21.5.0 +======= + +Minor Changes +------------- + +- minor changes to meet Red Hat requirements to be certified. + +v20.7.0 +======= + +Minor Changes +------------- + +- na_um_list_aggregates - Now sort by performance_capacity.used +- na_um_list_nodes - Now sort by performance_capacity.used + +v20.6.0 +======= + +New Modules +----------- + +- netapp.um_info.na_um_list_volumes - NetApp Unified Manager list volumes. + +v20.5.0 +======= + +New Modules +----------- + +- netapp.um_info.na_um_list_aggregates - NetApp Unified Manager list aggregates. +- netapp.um_info.na_um_list_clusters - NetApp Unified Manager list cluster. +- netapp.um_info.na_um_list_nodes - NetApp Unified Manager list nodes. +- netapp.um_info.na_um_list_svms - NetApp Unified Manager list svms. diff --git a/ansible_collections/netapp/um_info/COPYING b/ansible_collections/netapp/um_info/COPYING new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/ansible_collections/netapp/um_info/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ansible_collections/netapp/um_info/FILES.json b/ansible_collections/netapp/um_info/FILES.json new file mode 100644 index 000000000..0d45bda4d --- /dev/null +++ b/ansible_collections/netapp/um_info/FILES.json @@ -0,0 +1,418 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec72420df5dfbdce4111f715c96338df3b7cb75f58e478d2449c9720e560de8c", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd4a43b072697053c41ac2c6979513bd8fadd8c80eece1ca2a5454f24ecf85da", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "821070453abf1ec684677f43878fdd4ceb66bb30e40aeeb16b35aa71c0294249", + "format": 1 + }, + { + "name": "plugins/module_utils/netapp_module.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf7052585943d6e39a9b671538947f8de77411805f659c148267099603d26bef", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/na_um_list_volumes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e155cc127f6b1fb1a7512a52b15077e8bcc71cd2bd36fceb8e5811b43fa6c647", + "format": 1 + }, + { + "name": "plugins/modules/na_um_list_nodes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9dc5fee102f858e25be36bd6a4cbc72732fd4d87f5693d9dc9c070360b05b3b", + "format": 1 + }, + { + "name": "plugins/modules/na_um_svms_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "810aed45c718629fc7629558a6f1de69bcb30206dbe1b04b8aff4284512b910f", + "format": 1 + }, + { + "name": "plugins/modules/na_um_clusters_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa552cbf9cc2684c8378023e0bd8b338d33f8f826c05812addc5867830999b97", + "format": 1 + }, + { + "name": "plugins/modules/na_um_aggregates_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b65dde8d0d786b63ea89d411024d5c36dc56fe45d9472eec7f5c1d3dba47fff8", + "format": 1 + }, + { + "name": "plugins/modules/na_um_list_clusters.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa552cbf9cc2684c8378023e0bd8b338d33f8f826c05812addc5867830999b97", + "format": 1 + }, + { + "name": "plugins/modules/na_um_list_svms.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "810aed45c718629fc7629558a6f1de69bcb30206dbe1b04b8aff4284512b910f", + "format": 1 + }, + { + "name": "plugins/modules/na_um_nodes_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9dc5fee102f858e25be36bd6a4cbc72732fd4d87f5693d9dc9c070360b05b3b", + "format": 1 + }, + { + "name": "plugins/modules/na_um_volumes_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e155cc127f6b1fb1a7512a52b15077e8bcc71cd2bd36fceb8e5811b43fa6c647", + "format": 1 + }, + { + "name": "plugins/modules/na_um_list_aggregates.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b65dde8d0d786b63ea89d411024d5c36dc56fe45d9472eec7f5c1d3dba47fff8", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/compat/unittest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f", + "format": 1 + }, + { + "name": "tests/unit/compat/builtins.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1", + "format": 1 + }, + { + "name": "tests/unit/compat/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/compat/mock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "format": 1 + }, + { + "name": "tests/unit/requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68a61b1d58a722f4ffabaa28da01c9837c93a582ea41c1bfb1c1fd54ea2d8fab", + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_netapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4de097829490ea8016a6227b340a27e23a38f40189f12f80217caa199c608ec", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_um_volumes_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8a2d4ca0f304a588de4d642da415362f3b15b2926fa12a90117b58b9f71d6d9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_um_clusters_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11f9aa85947d440b6a647a2cc6be1cf16d93d69b08ed288a33fa8168836d2521", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_um_nodes_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf4ded8134d30ed7b82769252addf2094d07b6bf3ec81e7aba0615b290558cfb", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_um_aggregates_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba3687cb122aa7452f21052b5a6f26448df8356e92e4b78c20cce55c66ea3026", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_na_um_svms_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7933824403b197ec756f540be054a5e2c75b5a3a28cf60280cd11493f4c4b235", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88d46a475f366fffbd8ec430a85972ed1aa180268564bd88434cceaaaf13712c", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3962.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4f9cd51e1471197180cbedd7a89fc1ebbbd5f3daed2ac50a63e1df54e28c0b7", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4087.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "549499c7518f654d08f85c8d2774506fa8206db9cfad997a620a874ba55a0b24", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4059.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a77590a8f2eefce57127281e067aa0a873c7dee6c29c64b9f24a6aa44ed559fc", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-2952.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df2ee1a655d129670751fac00f64cb82d73b66b4b2e4babf1c03eef1958f6784", + "format": 1 + }, + { + "name": "changelogs/fragments/20.7.0.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc97a5a80d92a3fa6228c00d686e4ce5173facb9ce1d2282905883eb4bec385a", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-3920.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "819a48b8af6f9be3dc5978af0e690830f15f3090cc9ce1e86532d726809cfb2c", + "format": 1 + }, + { + "name": "changelogs/fragments/DEVOPS-4416.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "603b6174cae76f0b6d21432d05ba48ec1332b3a24bb910a81d75b1d8c0942914", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfd5a84377e87d5a796d1b6fe3c534fe0bceb63b8bc1dcd94a8bbf9ea9c86947", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae574e5d7e3e4d82a855ed53b06c34a26134d7a9d50e023bf2408b51e0f8a679", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e4fe7f09274f317cf0b299f8c35a3df98f8d48e5014a7c7523c587593e1a63c", + "format": 1 + }, + { + "name": ".github/workflows/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa99709b97a6f722ae1a51afaf32ad66aa2d1306b708b8b9c8d723be715ca4c4", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/feature_request.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "faa5043f7628d0fa7975b9feb2c4d72e73965d0e6f072ca5fa13b00055f773e3", + "format": 1 + }, + { + "name": ".github/ISSUE_TEMPLATE/bug_report.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b1080b0ad861868077511d5a8fa8b3a84c5abb2cfc13cf4fa10eeaffb2c0957", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eff94a97af8456b8fcf2677bf5e05db854239b496bc787834c562f015937fbcf", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/um_info/MANIFEST.json b/ansible_collections/netapp/um_info/MANIFEST.json new file mode 100644 index 000000000..40b3df1ae --- /dev/null +++ b/ansible_collections/netapp/um_info/MANIFEST.json @@ -0,0 +1,34 @@ +{ + "collection_info": { + "namespace": "netapp", + "name": "um_info", + "version": "21.8.0", + "authors": [ + "NetApp Ansible Team " + ], + "readme": "README.md", + "tags": [ + "storage", + "netapp", + "aiqum", + "um", + "ontap" + ], + "description": "NetApp Unified Manager(AIQUM 9.7) Collection", + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/netapp.um_info", + "documentation": null, + "homepage": "https://netapp.io/configuration-management-and-automation/", + "issues": "https://github.com/ansible-collections/netapp.um_info/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb6c57414d2dfe99a63f715ea33254ca1d6cd458b50fa0cbae778ea41b14ef2c", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/netapp/um_info/README.md b/ansible_collections/netapp/um_info/README.md new file mode 100644 index 000000000..193bbab59 --- /dev/null +++ b/ansible_collections/netapp/um_info/README.md @@ -0,0 +1,83 @@ +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/um_info/index.html) +![example workflow](https://github.com/ansible-collections/netapp.um_info/actions/workflows/main.yml/badge.svg) +[![codecov](https://codecov.io/gh/ansible-collections/netapp.um_info/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.um_info) + + +============================================================= + + netapp.um_info + + NetApp Unified Manager(AIQUM 9.7) Collection + + Copyright (c) 2020 NetApp, Inc. All rights reserved. + Specifications subject to change without notice. + +============================================================= +# Installation +```bash +ansible-galaxy collection install netapp.um_info +``` +To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module +``` +collections: + - netapp.um_info +``` + +# Module documentation +https://docs.ansible.com/ansible/devel/collections/netapp/um_info/ + +# Code of Conduct +This collection follows the [Ansible project's Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). + +# Need help +Join our Slack Channel at [Netapp.io](http://netapp.io/slack) + +# Release Notes + +## 21.8.0 + +#### Minor changes + - all modules - enable usage of Ansible module group defaults - for Ansible 2.12+. + +## 21.7.0 + +#### Minor changes + - all modules - ability to trace API calls and responses. + - all modules - new `max_records` option to limit the amount of data in a single GET response. + +### Bux fixes + - all modules - report error when connecting to a server that does not run AIQUM. + - all modules - return all records rather than the first 1000 records (mostly for volumes). + - rename na_um_list_volumes.p to na_um_list_volumes.py. + +## 21.6.0 +### Minor changes +- na_um_list_aggregates has been renamed na_um_aggregates_info +- na_um_list_clusters has been renamed na_um_clusters_info +- na_um_list_nodes has been renamed na_um_nodes_info +- na_um_list_svms has been renamed na_um_svms_info +- na_um_list_volumes has been renamed na_um_volumes_info + +## 21.5.0 + +### Minor changes +- minor changes to meet Red Hat requirements to be certified. + +## 20.7.0 + +### Minor changes +- na_um_list_aggregates: Now sort by performance_capacity.used +- na_um_list_nodes: Now sort by performance_capacity.used + +## 20.6.0 + +### New Modules +- na_um_list_volumes: list volumes. + +## 20.5.0 + +### New Modules +- na_um_list_aggregates: list aggregates. +- na_um_list_clusters: list clusters. +- na_um_list_nodes: list nodes. +- na_um_list_svms: list svms. diff --git a/ansible_collections/netapp/um_info/changelogs/changelog.yaml b/ansible_collections/netapp/um_info/changelogs/changelog.yaml new file mode 100644 index 000000000..c5d56b307 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/changelog.yaml @@ -0,0 +1,72 @@ +ancestor: null +releases: + 20.5.0: + modules: + - description: NetApp Unified Manager list aggregates. + name: na_um_list_aggregates + namespace: '' + - description: NetApp Unified Manager list cluster. + name: na_um_list_clusters + namespace: '' + - description: NetApp Unified Manager list nodes. + name: na_um_list_nodes + namespace: '' + - description: NetApp Unified Manager list svms. + name: na_um_list_svms + namespace: '' + release_date: '2020-05-06' + 20.6.0: + modules: + - description: NetApp Unified Manager list volumes. + name: na_um_list_volumes + namespace: '' + release_date: '2020-06-03' + 20.7.0: + changes: + minor_changes: + - na_um_list_aggregates - Now sort by performance_capacity.used + - na_um_list_nodes - Now sort by performance_capacity.used + fragments: + - 20.7.0.yaml + release_date: '2020-06-24' + 21.5.0: + changes: + minor_changes: + - minor changes to meet Red Hat requirements to be certified. + fragments: + - DEVOPS-3920.yaml + release_date: '2021-04-21' + 21.6.0: + changes: + minor_changes: + - na_um_list_aggregates has been renamed na_um_aggregates_info. + - na_um_list_clusters has been renamed na_um_clusters_info. + - na_um_list_nodes has been renamed na_um_nodes_info. + - na_um_list_svms has been renamed na_um_svms_info. + - na_um_list_volumes has been renamed na_um_volumes_info. + fragments: + - DEVOPS-3962.yaml + release_date: '2021-05-06' + 21.7.0: + changes: + bugfixes: + - all modules - report error when connecting to a server that does not run AIQUM. + - all modules - return all records rather than the first 1000 records (mostly + for volumes). + - rename na_um_list_volumes.p to na_um_list_volumes.py + minor_changes: + - all modules - ability to trace API calls and responses. + - all modules - new ``max_records`` option to limit the amount of data in a + single GET response. + fragments: + - DEVOPS-2952.yaml + - DEVOPS-4059.yaml + - DEVOPS-4087.yaml + release_date: '2021-07-14' + 21.8.0: + changes: + minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. + fragments: + - DEVOPS-4416.yaml + release_date: '2021-11-03' diff --git a/ansible_collections/netapp/um_info/changelogs/config.yaml b/ansible_collections/netapp/um_info/changelogs/config.yaml new file mode 100644 index 000000000..a39ec8cc2 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/config.yaml @@ -0,0 +1,32 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: NetApp Unified Manager Info Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/20.7.0.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/20.7.0.yaml new file mode 100644 index 000000000..75f9b4e46 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/20.7.0.yaml @@ -0,0 +1,3 @@ +minor_changes: + - na_um_list_aggregates - Now sort by performance_capacity.used + - na_um_list_nodes - Now sort by performance_capacity.used diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-2952.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-2952.yaml new file mode 100644 index 000000000..0d0699cc9 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-2952.yaml @@ -0,0 +1,4 @@ +minor_changes: + - all modules - new ``max_records`` option to limit the amount of data in a single GET response. +bugfixes: + - all modules - return all records rather than the first 1000 records (mostly for volumes). diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3920.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3920.yaml new file mode 100644 index 000000000..c3c7f1224 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3920.yaml @@ -0,0 +1,2 @@ +minor_changes: + - minor changes to meet Red Hat requirements to be certified. diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3962.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3962.yaml new file mode 100644 index 000000000..1d3ef3f86 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-3962.yaml @@ -0,0 +1,6 @@ +minor_changes: +- na_um_list_aggregates has been renamed na_um_aggregates_info. +- na_um_list_clusters has been renamed na_um_clusters_info. +- na_um_list_nodes has been renamed na_um_nodes_info. +- na_um_list_svms has been renamed na_um_svms_info. +- na_um_list_volumes has been renamed na_um_volumes_info. diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4059.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4059.yaml new file mode 100644 index 000000000..824f55f1e --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4059.yaml @@ -0,0 +1,2 @@ +bugfixes: + - rename na_um_list_volumes.p to na_um_list_volumes.py diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4087.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4087.yaml new file mode 100644 index 000000000..836a4b1b2 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4087.yaml @@ -0,0 +1,4 @@ +minor_changes: + - all modules - ability to trace API calls and responses. +bugfixes: + - all modules - report error when connecting to a server that does not run AIQUM. diff --git a/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4416.yaml new file mode 100644 index 000000000..6b4b660a0 --- /dev/null +++ b/ansible_collections/netapp/um_info/changelogs/fragments/DEVOPS-4416.yaml @@ -0,0 +1,2 @@ +minor_changes: + - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+. diff --git a/ansible_collections/netapp/um_info/meta/runtime.yml b/ansible_collections/netapp/um_info/meta/runtime.yml new file mode 100644 index 000000000..04c66ffeb --- /dev/null +++ b/ansible_collections/netapp/um_info/meta/runtime.yml @@ -0,0 +1,9 @@ +--- +requires_ansible: ">=2.9.10" +action_groups: + netapp_um_info: + - na_um_aggregates_info + - na_um_clusters_info + - na_um_nodes_info + - na_um_svms_info + - na_um_volumes_info diff --git a/ansible_collections/netapp/um_info/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/um_info/plugins/doc_fragments/netapp.py new file mode 100644 index 000000000..0790f109a --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/doc_fragments/netapp.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Suhas Bangalore Shekar +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + - See respective platform section for more details +requirements: + - See respective platform section for more details +notes: + - Ansible modules are available for the following NetApp Storage Management Platforms: AIQUM 9.7 +''' + + # Documentation fragment for AIQUM (um) + UM = r''' +options: + hostname: + description: + - The hostname or IP address of the Unified Manager instance. + type: str + required: true + username: + description: + - username of the Unified Manager instance. + type: str + required: true + password: + description: + - Password for the specified user. + type: str + required: true + validate_certs: + description: + - If set to C(False), the SSL certificates will not be validated. + - This should only set to C(False) used on personally controlled sites using self-signed certificates. + type: bool + default: True + http_port: + description: + - Override the default port (443) with this port + type: int + feature_flags: + description: + - Enable or disable a new feature. + - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility. + - Supported keys and values are subject to change without notice. Unknown keys are ignored. + - trace_apis can be set to true to enable tracing, data is written to /tmp/um_apis.log. + type: dict + version_added: 21.7.0 + max_records: + description: + - Maximum number of records retrieved in a single GET request. + - This module loops on GET requests until all available records are fetched. + - If absent, AIQUM uses 1000. + type: int + version_added: 21.7.0 + + +requirements: + - A AIQUM 9.7 system. + - Ansible 2.9 or later. + +notes: + - With the 21.6.0 release, all modules have been renamed to na_um__info. The old ones will continue to work but will be depecrated in the future. + - The modules prefixed with na_um are built to support the AIQUM 9.7 platform. + - Supports check_mode. +''' diff --git a/ansible_collections/netapp/um_info/plugins/module_utils/netapp.py b/ansible_collections/netapp/um_info/plugins/module_utils/netapp.py new file mode 100644 index 000000000..ba58f56e8 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/module_utils/netapp.py @@ -0,0 +1,246 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2017, Sumit Kumar +# Copyright (c) 2017, Michael Price +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' +common routines for um_info +''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import logging +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils._text import to_native + +try: + from ansible.module_utils.ansible_release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + +COLLECTION_VERSION = "21.8.0" + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + +ERROR_MSG = dict( + no_cserver='This module is expected to run as cluster admin' +) + +LOG = logging.getLogger(__name__) +LOG_FILE = '/tmp/um_apis.log' + + +def na_um_host_argument_spec(): + + return dict( + hostname=dict(required=True, type='str'), + username=dict(required=True, type='str'), + password=dict(required=True, type='str', no_log=True), + validate_certs=dict(required=False, type='bool', default=True), + http_port=dict(required=False, type='int'), + feature_flags=dict(required=False, type='dict', default=dict()), + max_records=dict(required=False, type='int') + ) + + +def has_feature(module, feature_name): + feature = get_feature(module, feature_name) + if isinstance(feature, bool): + return feature + module.fail_json(msg="Error: expected bool type for feature flag: %s" % feature_name) + + +def get_feature(module, feature_name): + ''' if the user has configured the feature, use it + otherwise, use our default + ''' + default_flags = dict( + strict_json_check=True, # if true, fail if response.content in not empty and is not valid json + trace_apis=False, # if true, append REST requests/responses to LOG_FILE + + ) + + if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']: + return module.params['feature_flags'][feature_name] + if feature_name in default_flags: + return default_flags[feature_name] + module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name) + + +class UMRestAPI(object): + ''' send REST request and process response ''' + def __init__(self, module, timeout=60): + self.module = module + self.username = self.module.params['username'] + self.password = self.module.params['password'] + self.hostname = self.module.params['hostname'] + self.verify = self.module.params['validate_certs'] + self.max_records = self.module.params['max_records'] + self.timeout = timeout + if self.module.params.get('http_port') is not None: + self.url = 'https://%s:%d' % (self.hostname, self.module.params['http_port']) + else: + self.url = 'https://%s' % self.hostname + self.errors = list() + self.debug_logs = list() + self.check_required_library() + if has_feature(module, 'trace_apis'): + logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s') + + def check_required_library(self): + if not HAS_REQUESTS: + self.module.fail_json(msg=missing_required_lib('requests')) + + def get_records(self, message, api): + records = list() + try: + if message['total_records'] > 0: + records = message['records'] + if message['total_records'] != len(records): + self.module.warn('Mismatch between received: %d and expected: %d records.' % (len(records), message['total_records'])) + except KeyError as exc: + self.module.fail_json(msg='Error: unexpected response from %s: %s - expecting key: %s' + % (api, message, to_native(exc))) + return records + + def send_request(self, method, api, params, json=None, accept=None): + ''' send http request and process response, including error conditions ''' + url = self.url + api + status_code = None + content = None + json_dict = None + json_error = None + error_details = None + headers = None + if accept is not None: + headers = dict() + # accept is used to turn on/off HAL linking + if accept is not None: + headers['accept'] = accept + + def check_contents(response): + '''json() may fail on an empty value, but it's OK if no response is expected. + To avoid false positives, only report an issue when we expect to read a value. + The first get will see it. + ''' + if method == 'GET' and has_feature(self.module, 'strict_json_check'): + contents = response.content + if len(contents) > 0: + raise ValueError("Expecting json, got: %s" % contents) + + def get_json(response): + ''' extract json, and error message if present ''' + try: + json = response.json() + except ValueError: + check_contents(response) + return None, None + error = json.get('error') + return json, error + + self.log_debug('sending', repr(dict(method=method, url=url, verify=self.verify, params=params, + timeout=self.timeout, json=json, headers=headers))) + try: + response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), + params=params, timeout=self.timeout, json=json, headers=headers) + content = response.content # for debug purposes + status_code = response.status_code + # If the response was successful, no Exception will be raised + response.raise_for_status() + json_dict, json_error = get_json(response) + except requests.exceptions.HTTPError as err: + __, json_error = get_json(response) + if json_error is None: + self.log_error(status_code, 'HTTP error: %s' % err) + error_details = str(err) + # If an error was reported in the json payload, it is handled below + except requests.exceptions.ConnectionError as err: + self.log_error(status_code, 'Connection error: %s' % err) + error_details = str(err) + except Exception as err: + self.log_error(status_code, 'Other error: %s' % err) + error_details = str(err) + if json_error is not None: + self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error)) + error_details = json_error + self.log_debug(status_code, content) + return json_dict, error_details + + def get(self, api, params): + + def get_next_api(message): + '''make sure _links is present, and href is present if next is present + return api if next is present, None otherwise + return error if _links or href are missing + ''' + api, error = None, None + if message is None or '_links' not in message: + error = 'Expecting _links key in %s' % message + elif 'next' in message['_links']: + if 'href' in message['_links']['next']: + api = message['_links']['next']['href'] + else: + error = 'Expecting href key in %s' % message['_links']['next'] + return api, error + + method = 'GET' + records = list() + if self.max_records is not None: + if params and 'max_records' not in params: + params['max_records'] = self.max_records + else: + params = dict(max_records=self.max_records) + api = '/api/%s' % api + + while api: + message, error = self.send_request(method, api, params) + if error: + return message, error + api, error = get_next_api(message) + if error: + return message, error + if 'records' in message: + records.extend(message['records']) + params = None # already included in the next link + + if records: + message['records'] = records + return message, error + + def log_error(self, status_code, message): + LOG.error("%s: %s", status_code, message) + self.errors.append(message) + self.debug_logs.append((status_code, message)) + + def log_debug(self, status_code, content): + LOG.debug("%s: %s", status_code, content) + self.debug_logs.append((status_code, content)) diff --git a/ansible_collections/netapp/um_info/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/um_info/plugins/module_utils/netapp_module.py new file mode 100644 index 000000000..f3b95800e --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/module_utils/netapp_module.py @@ -0,0 +1,51 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2020, Laurent Nicolas +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' Support class for NetApp ansible modules ''' + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class NetAppModule(object): + ''' + Common class for NetApp modules + set of support functions to derive actions based + on the current state of the system, and a desired state + ''' + + def __init__(self): + self.changed = False + self.parameters = {} + + def set_parameters(self, ansible_params): + self.parameters = dict() + for param in ansible_params: + if ansible_params[param] is not None: + self.parameters[param] = ansible_params[param] + return self.parameters diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_aggregates_info.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_aggregates_info.py new file mode 100644 index 000000000..10a34cfdf --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_aggregates_info.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_aggregates +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_aggregates_info +short_description: NetApp Unified Manager list aggregates. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Aggregates on AIQUM. +''' + +EXAMPLES = """ +- name: List Aggregates + netapp.um_info.na_um_aggregates_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Aggregates information + returned: always + type: list + sample: [{'node': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'snaplock_type': '...', + 'uuid': '...', + 'space': + {'block_storage': + {'available': ..., + 'used': ..., + 'size': ... + }, + 'efficiency': + {'savings': ..., + 'logical_used': ... + } + }, + 'block_storage': + {'hybrid_cache': + {'enabled': ..., + 'size': ... + }, + 'primary': + {'raid_size': ..., + 'raid_type': '...' + }, + 'mirror': + {'state': '...' + } + }, + 'data_encryption': + {'software_encryption_enabled': ... + }, + 'cluster': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'state': '...', + 'create_time': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'key': '...', + 'type': '...', + 'name': '...' + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMAggregate(object): + ''' aggregates initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_aggregates(self): + """ + Fetch details of aggregates. + :return: + Dictionary of current details if aggregates found + None if aggregates is not found + """ + data = {} + api = "datacenter/storage/aggregates?order_by=performance_capacity.used" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the aggregates listing + :return: None + """ + current = self.get_aggregates() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Aggregate class instance and invoke apply + :return: None + """ + list_aggregates_obj = NetAppUMAggregate() + list_aggregates_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_clusters_info.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_clusters_info.py new file mode 100644 index 000000000..60baa7a48 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_clusters_info.py @@ -0,0 +1,152 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_clusters +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_clusters_info +short_description: NetApp Unified Manager list cluster. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Cluster on AIQUM. +''' + +EXAMPLES = """ +- name: List Clusters + netapp.um_info.na_um_clusters_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Clusters information + returned: always + type: list + sample: [{ + 'name': '...', + 'version': + { + 'generation': ..., + 'major': ..., + 'full': '...', + 'minor': ... + }, + 'management_ip': '...', + 'contact': ..., + '_links': + { + 'self': + { + 'href': '...' + } + }, + 'location': '...', + 'key': '', + 'nodes': + [ + { + 'uptime': ..., + 'uuid': '...', + 'version': + { + 'generation': ..., + 'major': ..., + 'full': '...', + 'minor': ... + }, + '_links': + { + 'self': + { + 'href': '...' + } + }, + 'location': '...', + 'key': '...', + 'serial_number': '...', + 'model': '...', + 'name': '...' + } + ], + 'isSanOptimized': ..., + 'uuid': '...' + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMCluster(object): + ''' cluster initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_clusters(self): + """ + Fetch details of clusters. + :return: + Dictionary of current details if clusters found + None if clusters is not found + """ + data = {} + api = "datacenter/cluster/clusters" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the cluster listing + :return: None + """ + current = self.get_clusters() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Cluster class instance and invoke apply + :return: None + """ + list_cluster_obj = NetAppUMCluster() + list_cluster_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_list_aggregates.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_aggregates.py new file mode 100644 index 000000000..10a34cfdf --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_aggregates.py @@ -0,0 +1,163 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_aggregates +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_aggregates_info +short_description: NetApp Unified Manager list aggregates. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Aggregates on AIQUM. +''' + +EXAMPLES = """ +- name: List Aggregates + netapp.um_info.na_um_aggregates_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Aggregates information + returned: always + type: list + sample: [{'node': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'snaplock_type': '...', + 'uuid': '...', + 'space': + {'block_storage': + {'available': ..., + 'used': ..., + 'size': ... + }, + 'efficiency': + {'savings': ..., + 'logical_used': ... + } + }, + 'block_storage': + {'hybrid_cache': + {'enabled': ..., + 'size': ... + }, + 'primary': + {'raid_size': ..., + 'raid_type': '...' + }, + 'mirror': + {'state': '...' + } + }, + 'data_encryption': + {'software_encryption_enabled': ... + }, + 'cluster': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'state': '...', + 'create_time': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'key': '...', + 'type': '...', + 'name': '...' + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMAggregate(object): + ''' aggregates initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_aggregates(self): + """ + Fetch details of aggregates. + :return: + Dictionary of current details if aggregates found + None if aggregates is not found + """ + data = {} + api = "datacenter/storage/aggregates?order_by=performance_capacity.used" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the aggregates listing + :return: None + """ + current = self.get_aggregates() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Aggregate class instance and invoke apply + :return: None + """ + list_aggregates_obj = NetAppUMAggregate() + list_aggregates_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_list_clusters.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_clusters.py new file mode 100644 index 000000000..60baa7a48 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_clusters.py @@ -0,0 +1,152 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_clusters +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_clusters_info +short_description: NetApp Unified Manager list cluster. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Cluster on AIQUM. +''' + +EXAMPLES = """ +- name: List Clusters + netapp.um_info.na_um_clusters_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Clusters information + returned: always + type: list + sample: [{ + 'name': '...', + 'version': + { + 'generation': ..., + 'major': ..., + 'full': '...', + 'minor': ... + }, + 'management_ip': '...', + 'contact': ..., + '_links': + { + 'self': + { + 'href': '...' + } + }, + 'location': '...', + 'key': '', + 'nodes': + [ + { + 'uptime': ..., + 'uuid': '...', + 'version': + { + 'generation': ..., + 'major': ..., + 'full': '...', + 'minor': ... + }, + '_links': + { + 'self': + { + 'href': '...' + } + }, + 'location': '...', + 'key': '...', + 'serial_number': '...', + 'model': '...', + 'name': '...' + } + ], + 'isSanOptimized': ..., + 'uuid': '...' + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMCluster(object): + ''' cluster initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_clusters(self): + """ + Fetch details of clusters. + :return: + Dictionary of current details if clusters found + None if clusters is not found + """ + data = {} + api = "datacenter/cluster/clusters" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the cluster listing + :return: None + """ + current = self.get_clusters() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Cluster class instance and invoke apply + :return: None + """ + list_cluster_obj = NetAppUMCluster() + list_cluster_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_list_nodes.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_nodes.py new file mode 100644 index 000000000..27e81ec2e --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_nodes.py @@ -0,0 +1,145 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_nodes +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_nodes_info +short_description: NetApp Unified Manager list nodes. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Nodes on AIQUM. +''' + +EXAMPLES = """ +- name: List Nodes + netapp.um_info.na_um_nodes_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Nodes information + returned: always + type: list + sample: [{'allFlashOptimized': ..., + 'uptime': ..., + 'vendor': '...', + 'uuid': '...', + 'nvramid': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'cluster': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'version': + {'generation': ..., + 'major': ..., + 'full': '...', + 'minor': ... + }, + 'systemid': '...', + 'location': '...', + 'key': ...', + 'is_all_flash_optimized': ..., + 'serial_number': '...', + 'model': '...', + 'ha': + {'partners': + [{'_links': {}, + 'uuid': ..., + 'key': ..., + 'name': ... + }] + }, + 'health': ..., + 'name': '...' + }] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMNode(object): + ''' nodes initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_nodes(self): + """ + Fetch details of nodes. + :return: + Dictionary of current details if nodes found + None if nodes is not found + """ + data = {} + api = "datacenter/cluster/nodes?order_by=performance_capacity.used" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the nodes listing + :return: None + """ + current = self.get_nodes() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Node class instance and invoke apply + :return: None + """ + list_nodes_obj = NetAppUMNode() + list_nodes_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_list_svms.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_svms.py new file mode 100644 index 000000000..2722e9ef6 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_svms.py @@ -0,0 +1,174 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_svms +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_svms_info +short_description: NetApp Unified Manager list svms. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List SVMs on AIQUM. +''' + +EXAMPLES = """ +- name: List SVMs + netapp.um_info.na_um_svms_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of SVMs information + returned: always + type: list + sample: [{'fcp': + {'enabled': ... + }, + 'dns': ..., + 'snapshot_policy': + {'_links': {}, + 'uuid': ..., + 'key': '...', + 'name': '...' + }, + 'language': '...', + 'subtype': 'default', + 'aggregates': + [{'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }], + 'nvme': + {'enabled': ... + }, + 'ipspace': + {'_links': {}, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'uuid': '...', + 'cluster': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'state': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'key': '...', + 'ldap': + {'enabled': ... + }, + 'nis': + {'domain': ..., + 'enabled': ..., + 'servers': ... + }, + 'cifs': + {'enabled': ..., + 'name': ..., + 'ad_domain': + {'fqdn': ... + } + }, + 'iscsi': + {'enabled': ... + }, + 'nfs': + {'enabled': ... + }, + 'name': '...' + }] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMSVM(object): + ''' svms initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_svms(self): + """ + Fetch details of svms. + :return: + Dictionary of current details if svms found + None if svms is not found + """ + data = {} + api = "datacenter/svm/svms" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the svms listing + :return: None + """ + current = self.get_svms() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create SVM class instance and invoke apply + :return: None + """ + list_svms_obj = NetAppUMSVM() + list_svms_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_list_volumes.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_volumes.py new file mode 100644 index 000000000..099213226 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_list_volumes.py @@ -0,0 +1,133 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_volumes +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_volumes_info +short_description: NetApp Unified Manager list volumes. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.6.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Volumes on AIQUM. +''' + +EXAMPLES = """ +- name: List Volumes + netapp.um_info.na_um_volumes_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Volumes information + returned: always + type: list + sample: [{'style': '...', + 'svm': + {'_links': + {'self': {...} + }, + '...' + }, + 'qos': {...}, + 'name': '...', + 'language': '...', + 'space': {...}, + 'aggregates': + [ + {...} + ], + 'tiering': {...}, + 'autosize': {...}, + 'cluster': {...}, + 'state': '...', + 'create_time': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'key': '...', + 'snapmirror': {...}, + 'snapshot_policy': {...}, + 'type': '...', + 'uuid': '...' + }] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMVolume(object): + ''' volumes initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_volumes(self): + """ + Fetch details of volumes. + :return: + Dictionary of current details if volumes found + None if volumes is not found + """ + data = {} + api = "datacenter/storage/volumes" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the volumes listing + :return: None + """ + current = self.get_volumes() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Volume class instance and invoke apply + :return: None + """ + list_volumes_obj = NetAppUMVolume() + list_volumes_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_nodes_info.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_nodes_info.py new file mode 100644 index 000000000..27e81ec2e --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_nodes_info.py @@ -0,0 +1,145 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_nodes +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_nodes_info +short_description: NetApp Unified Manager list nodes. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Nodes on AIQUM. +''' + +EXAMPLES = """ +- name: List Nodes + netapp.um_info.na_um_nodes_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Nodes information + returned: always + type: list + sample: [{'allFlashOptimized': ..., + 'uptime': ..., + 'vendor': '...', + 'uuid': '...', + 'nvramid': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'cluster': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'version': + {'generation': ..., + 'major': ..., + 'full': '...', + 'minor': ... + }, + 'systemid': '...', + 'location': '...', + 'key': ...', + 'is_all_flash_optimized': ..., + 'serial_number': '...', + 'model': '...', + 'ha': + {'partners': + [{'_links': {}, + 'uuid': ..., + 'key': ..., + 'name': ... + }] + }, + 'health': ..., + 'name': '...' + }] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMNode(object): + ''' nodes initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_nodes(self): + """ + Fetch details of nodes. + :return: + Dictionary of current details if nodes found + None if nodes is not found + """ + data = {} + api = "datacenter/cluster/nodes?order_by=performance_capacity.used" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the nodes listing + :return: None + """ + current = self.get_nodes() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Node class instance and invoke apply + :return: None + """ + list_nodes_obj = NetAppUMNode() + list_nodes_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_svms_info.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_svms_info.py new file mode 100644 index 000000000..2722e9ef6 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_svms_info.py @@ -0,0 +1,174 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_svms +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_svms_info +short_description: NetApp Unified Manager list svms. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.5.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List SVMs on AIQUM. +''' + +EXAMPLES = """ +- name: List SVMs + netapp.um_info.na_um_svms_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of SVMs information + returned: always + type: list + sample: [{'fcp': + {'enabled': ... + }, + 'dns': ..., + 'snapshot_policy': + {'_links': {}, + 'uuid': ..., + 'key': '...', + 'name': '...' + }, + 'language': '...', + 'subtype': 'default', + 'aggregates': + [{'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }], + 'nvme': + {'enabled': ... + }, + 'ipspace': + {'_links': {}, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'uuid': '...', + 'cluster': + {'_links': + {'self': + {'href': '...' + } + }, + 'uuid': '...', + 'key': '...', + 'name': '...' + }, + 'state': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'key': '...', + 'ldap': + {'enabled': ... + }, + 'nis': + {'domain': ..., + 'enabled': ..., + 'servers': ... + }, + 'cifs': + {'enabled': ..., + 'name': ..., + 'ad_domain': + {'fqdn': ... + } + }, + 'iscsi': + {'enabled': ... + }, + 'nfs': + {'enabled': ... + }, + 'name': '...' + }] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMSVM(object): + ''' svms initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_svms(self): + """ + Fetch details of svms. + :return: + Dictionary of current details if svms found + None if svms is not found + """ + data = {} + api = "datacenter/svm/svms" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the svms listing + :return: None + """ + current = self.get_svms() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create SVM class instance and invoke apply + :return: None + """ + list_svms_obj = NetAppUMSVM() + list_svms_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/plugins/modules/na_um_volumes_info.py b/ansible_collections/netapp/um_info/plugins/modules/na_um_volumes_info.py new file mode 100644 index 000000000..099213226 --- /dev/null +++ b/ansible_collections/netapp/um_info/plugins/modules/na_um_volumes_info.py @@ -0,0 +1,133 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' +na_um_list_volumes +''' + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + + +DOCUMENTATION = ''' +module: na_um_volumes_info +short_description: NetApp Unified Manager list volumes. +extends_documentation_fragment: + - netapp.um_info.netapp.um +version_added: '20.6.0' +author: NetApp Ansible Team (@carchi8py) + +description: +- List Volumes on AIQUM. +''' + +EXAMPLES = """ +- name: List Volumes + netapp.um_info.na_um_volumes_info: + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = """ +records: + description: Returns list of Volumes information + returned: always + type: list + sample: [{'style': '...', + 'svm': + {'_links': + {'self': {...} + }, + '...' + }, + 'qos': {...}, + 'name': '...', + 'language': '...', + 'space': {...}, + 'aggregates': + [ + {...} + ], + 'tiering': {...}, + 'autosize': {...}, + 'cluster': {...}, + 'state': '...', + 'create_time': '...', + '_links': + {'self': + {'href': '...' + } + }, + 'key': '...', + 'snapmirror': {...}, + 'snapshot_policy': {...}, + 'type': '...', + 'uuid': '...' + }] +""" + +from ansible.module_utils.basic import AnsibleModule +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils +from ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI + + +class NetAppUMVolume(object): + ''' volumes initialize and class methods ''' + + def __init__(self): + self.argument_spec = netapp_utils.na_um_host_argument_spec() + self.module = AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=True + ) + + self.na_helper = NetAppModule() + self.parameters = self.na_helper.set_parameters(self.module.params) + + self.rest_api = UMRestAPI(self.module) + + def get_volumes(self): + """ + Fetch details of volumes. + :return: + Dictionary of current details if volumes found + None if volumes is not found + """ + data = {} + api = "datacenter/storage/volumes" + message, error = self.rest_api.get(api, data) + if error: + self.module.fail_json(msg=error) + return self.rest_api.get_records(message, api) + + def apply(self): + """ + Apply action to the volumes listing + :return: None + """ + current = self.get_volumes() + if current is not None: + self.na_helper.changed = True + self.module.exit_json(changed=self.na_helper.changed, msg=current) + + +def main(): + """ + Create Volume class instance and invoke apply + :return: None + """ + list_volumes_obj = NetAppUMVolume() + list_volumes_obj.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp/um_info/requirements.txt b/ansible_collections/netapp/um_info/requirements.txt new file mode 100644 index 000000000..663bd1f6a --- /dev/null +++ b/ansible_collections/netapp/um_info/requirements.txt @@ -0,0 +1 @@ +requests \ No newline at end of file diff --git a/ansible_collections/netapp/um_info/tests/unit/compat/__init__.py b/ansible_collections/netapp/um_info/tests/unit/compat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/netapp/um_info/tests/unit/compat/builtins.py b/ansible_collections/netapp/um_info/tests/unit/compat/builtins.py new file mode 100644 index 000000000..f60ee6782 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/compat/builtins.py @@ -0,0 +1,33 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +# +# Compat for python2.7 +# + +# One unittest needs to import builtins via __import__() so we need to have +# the string that represents it +try: + import __builtin__ +except ImportError: + BUILTINS = 'builtins' +else: + BUILTINS = '__builtin__' diff --git a/ansible_collections/netapp/um_info/tests/unit/compat/mock.py b/ansible_collections/netapp/um_info/tests/unit/compat/mock.py new file mode 100644 index 000000000..0972cd2e8 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/compat/mock.py @@ -0,0 +1,122 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python3.x's unittest.mock module +''' +import sys + +# Python 2.7 + +# Note: Could use the pypi mock library on python3.x as well as python2.x. It +# is the same as the python3 stdlib mock library + +try: + # Allow wildcard import because we really do want to import all of mock's + # symbols into this compat shim + # pylint: disable=wildcard-import,unused-wildcard-import + from unittest.mock import * +except ImportError: + # Python 2 + # pylint: disable=wildcard-import,unused-wildcard-import + try: + from mock import * + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +# Prior to 3.4.4, mock_open cannot handle binary read_data +if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock diff --git a/ansible_collections/netapp/um_info/tests/unit/compat/unittest.py b/ansible_collections/netapp/um_info/tests/unit/compat/unittest.py new file mode 100644 index 000000000..73a20cf8c --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/compat/unittest.py @@ -0,0 +1,44 @@ +# (c) 2014, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +''' +Compat module for Python2.7's unittest module +''' + +import sys + +import pytest + +# Allow wildcard import because we really do want to import all of +# unittests's symbols into this compat shim +# pylint: disable=wildcard-import,unused-wildcard-import +if sys.version_info < (2, 7): + try: + # Need unittest2 on python2.6 + from unittest2 import * + except ImportError: + print('You need unittest2 installed on python2.6.x to run tests') + + class TestCase: + """ skip everything """ + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available') +else: + from unittest import * diff --git a/ansible_collections/netapp/um_info/tests/unit/plugins/module_utils/test_netapp.py b/ansible_collections/netapp/um_info/tests/unit/plugins/module_utils/test_netapp.py new file mode 100644 index 000000000..eefca9041 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/plugins/module_utils/test_netapp.py @@ -0,0 +1,236 @@ +# Copyright (c) 2018 NetApp +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +''' unit tests for module_utils netapp.py ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os.path +import sys +import tempfile + +import pytest + +from ansible.module_utils.ansible_release import __version__ as ansible_version +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.um_info.plugins.module_utils.netapp import COLLECTION_VERSION +from ansible_collections.netapp.um_info.tests.unit.compat.mock import patch + +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +SRR = { + # common responses + 'is_rest': (200, {}, None), + 'is_zapi': (400, {}, "Unreachable"), + # 'empty_good': ({}, None), + 'empty_good': (dict(_links=dict(self='me')), None), + 'get_next': (dict(_links=dict(self='me', next=dict(href='next_records'))), None), + 'get_data': (dict(_links=dict(self='me'), records=['data1', 'data2']), None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), + 'no__links_error': (dict(), None), + 'no_href_error': (dict(_links=dict(self='me', next=dict())), None), +} + + +def mock_args(feature_flags=None): + args = { + 'hostname': 'test', + 'username': 'test_user', + 'password': 'test_pass!', + } + if feature_flags is not None: + args.update({'feature_flags': feature_flags}) + return args + + +def create_module(args): + argument_spec = netapp_utils.na_um_host_argument_spec() + set_module_args(args) + module = basic.AnsibleModule(argument_spec) + return module + + +def create_restapi_object(args): + module = create_module(args) + module.fail_json = fail_json + rest_api = netapp_utils.UMRestAPI(module) + return rest_api + + +class mockResponse: + def __init__(self, json_data, status_code, raise_action=None): + self.json_data = json_data + self.status_code = status_code + self.content = json_data + self.raise_action = raise_action + + def raise_for_status(self): + pass + + def json(self): + if self.raise_action == 'bad_json': + raise ValueError(self.raise_action) + return self.json_data + + +@patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') +def test_empty_get(mock_request): + ''' get with no data ''' + mock_request.side_effect = [ + SRR['empty_good'], + SRR['end_of_sequence'], + ] + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert not error + # only one key (_links) + assert len(message) == 1 + + +@patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') +def test_get_next(mock_request): + ''' get with a next href ''' + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data'], + SRR['end_of_sequence'], + ] + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert not error + print('empty_get:', message) + assert message['records'] == SRR['get_data'][0]['records'] + + +@patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') +def test_negative_get_next_no__links(mock_request): + ''' get with a next href ''' + mock_request.side_effect = [ + SRR['no__links_error'], + SRR['end_of_sequence'], + ] + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + print('error:', error) + assert error + assert 'Expecting _links key in' in error + + +@patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') +def test_negative_get_next_no_href(mock_request): + ''' get with a next href ''' + mock_request.side_effect = [ + SRR['no_href_error'], + SRR['end_of_sequence'], + ] + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + print('error:', error) + assert error + assert 'Expecting href key in' in error + + +def test_has_feature_success_default_0(): + ''' existing feature_flag with default of False''' + flag = 'trace_apis' + module = create_module(mock_args()) + value = netapp_utils.has_feature(module, flag) + assert not value + + +def test_has_feature_success_default_1(): + ''' existing feature_flag with default of True''' + flag = 'strict_json_check' + module = create_module(mock_args()) + value = netapp_utils.has_feature(module, flag) + assert value + + +def test_has_feature_success_user_true(): + ''' existing feature_flag with value set to True ''' + flag = 'user_deprecation_warning' + args = dict(mock_args({flag: True})) + module = create_module(args) + value = netapp_utils.has_feature(module, flag) + assert value + + +def test_has_feature_success_user_false(): + ''' existing feature_flag with value set to False ''' + flag = 'user_deprecation_warning' + args = dict(mock_args({flag: False})) + print(args) + module = create_module(args) + value = netapp_utils.has_feature(module, flag) + assert not value + + +def test_has_feature_invalid_key(): + ''' existing feature_flag with unknown key ''' + flag = 'deprecation_warning_bad_key' + module = create_module(mock_args()) + # replace ANsible fail method with ours + module.fail_json = fail_json + with pytest.raises(AnsibleFailJson) as exc: + netapp_utils.has_feature(module, flag) + msg = 'Internal error: unexpected feature flag: %s' % flag + assert exc.value.args[0]['msg'] == msg + + +@patch('requests.request') +def test_empty_get_sent(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data=dict(_links='me'), status_code=200) + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert not error + # only one key (_links) + assert len(message) == 1 + + +@patch('requests.request') +def test_empty_get_sent_bad_json(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='anything', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert error + assert 'Expecting json, got: anything' in error + print('errors:', rest_api.errors) + print('debug:', rest_api.debug_logs) + + +@patch('requests.request') +def test_empty_get_sent_bad_but_empty_json(mock_request): + ''' get with no data ''' + mock_request.return_value = mockResponse(json_data='', status_code=200, raise_action='bad_json') + rest_api = create_restapi_object(mock_args()) + message, error = rest_api.get('api', None) + assert error + assert 'Expecting _links key in None' in error + print('errors:', rest_api.errors) + print('debug:', rest_api.debug_logs) diff --git a/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_aggregates_info.py b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_aggregates_info.py new file mode 100644 index 000000000..9d2479484 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_aggregates_info.py @@ -0,0 +1,159 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_um_aggregates_info """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.um_info.tests.unit.compat import unittest +from ansible_collections.netapp.um_info.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.um_info.plugins.modules.na_um_aggregates_info\ + import NetAppUMAggregate as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_not_so_good': ({}, None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), + 'get_next': (dict(_links=dict(self='me', next=dict(href='next_records'))), None), + 'get_data': (dict(_links=dict(self='me'), records=['data1', 'data2'], total_records=2), None), + 'get_data_missing_field': (dict(_links=dict(self='me'), records=['data1', 'data2']), None), + # module specific responses + 'get_aggregates': {'name': 'ansible'} +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockUMConnection(object): + ''' mock server connection to Unified Manager host ''' + + def __init__(self): + ''' pass init ''' + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.server = MockUMConnection() + # whether to use a mock or a simulator + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_aggregates_info.NetAppUMAggregate.get_aggregates') + def test_ensure_list_aggregates_get_called(self, get_aggregates): + ''' fetching details of aggregates ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + my_obj.get_aggregates = Mock(return_value=SRR['get_aggregates']) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_aggregates_info.NetAppUMAggregate.get_aggregates') + def test_ensure_get_called_existing(self, get_aggregates): + ''' test for existing aggregates''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.get_aggregates = Mock(return_value=SRR['get_aggregates']) + assert my_obj.get_aggregates() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_get_next(self, mock_request): + ''' test for existing aggregates''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + assert my_obj.get_aggregates() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_negative_get_next(self, mock_request): + ''' test for existing aggregates''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data_missing_field'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_aggregates() is not None + print(exc.value.args[0]) + msg = 'unexpected response from datacenter/storage/aggregates?order_by=performance_capacity.used' + assert msg in exc.value.args[0]['msg'] + msg = "expecting key: 'total_records'" + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_clusters_info.py b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_clusters_info.py new file mode 100644 index 000000000..c4939adb8 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_clusters_info.py @@ -0,0 +1,159 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_um_clusters_info """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.um_info.tests.unit.compat import unittest +from ansible_collections.netapp.um_info.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.um_info.plugins.modules.na_um_clusters_info\ + import NetAppUMCluster as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_good': ({}, None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), + 'get_next': (dict(_links=dict(self='me', next=dict(href='next_records'))), None), + 'get_data': (dict(_links=dict(self='me'), records=['data1', 'data2'], total_records=2), None), + 'get_data_missing_field': (dict(_links=dict(self='me'), records=['data1', 'data2']), None), + # module specific responses + 'get_cluster': {'name': 'ansible'} +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockUMConnection(object): + ''' mock server connection to Unified Manager host ''' + + def __init__(self): + ''' pass init ''' + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.server = MockUMConnection() + # whether to use a mock or a simulator + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_clusters_info.NetAppUMCluster.get_clusters') + def test_ensure_list_cluster_get_called(self, get_cluster): + ''' fetching details of cluster ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + my_obj.get_cluster = Mock(return_value=SRR['get_cluster']) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + # to reset na_helper from remembering the previous 'changed' value + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_clusters_info.NetAppUMCluster.get_clusters') + def test_ensure_get_called_existing(self, get_cluster): + ''' test for existing cluster''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.get_cluster = Mock(return_value=SRR['get_cluster']) + assert my_obj.get_cluster() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_get_next(self, mock_request): + ''' test for existing clusters''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + assert my_obj.get_clusters() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_negative_get_next(self, mock_request): + ''' test for existing clusters''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data_missing_field'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_clusters() is not None + print(exc.value.args[0]) + msg = 'unexpected response from datacenter/cluster/clusters' + assert msg in exc.value.args[0]['msg'] + msg = "expecting key: 'total_records'" + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_nodes_info.py b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_nodes_info.py new file mode 100644 index 000000000..e5769d1f1 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_nodes_info.py @@ -0,0 +1,158 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_um_nodes_info """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.um_info.tests.unit.compat import unittest +from ansible_collections.netapp.um_info.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.um_info.plugins.modules.na_um_nodes_info\ + import NetAppUMNode as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_good': ({}, None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), + 'get_next': (dict(_links=dict(self='me', next=dict(href='next_records'))), None), + 'get_data': (dict(_links=dict(self='me'), records=['data1', 'data2'], total_records=2), None), + 'get_data_missing_field': (dict(_links=dict(self='me'), records=['data1', 'data2']), None), + # module specific responses + 'get_nodes': {'name': 'ansible'} +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockUMConnection(object): + ''' mock server connection to Unified Manager host ''' + + def __init__(self): + ''' pass init ''' + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.server = MockUMConnection() + # whether to use a mock or a simulator + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_nodes_info.NetAppUMNode.get_nodes') + def test_ensure_list_nodes_get_called(self, get_nodes): + ''' fetching details of nodes ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + my_obj.get_nodes = Mock(return_value=SRR['get_nodes']) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_nodes_info.NetAppUMNode.get_nodes') + def test_ensure_get_called_existing(self, get_nodes): + ''' test for existing nodes''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.get_nodes = Mock(return_value=SRR['get_nodes']) + assert my_obj.get_nodes() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_get_next(self, mock_request): + ''' test for existing nodes''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + assert my_obj.get_nodes() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_negative_get_next(self, mock_request): + ''' test for existing nodes''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data_missing_field'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_nodes() is not None + print(exc.value.args[0]) + msg = 'unexpected response from datacenter/cluster/nodes' + assert msg in exc.value.args[0]['msg'] + msg = "expecting key: 'total_records'" + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_svms_info.py b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_svms_info.py new file mode 100644 index 000000000..2eafd508f --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_svms_info.py @@ -0,0 +1,158 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_um_svms_info """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.um_info.tests.unit.compat import unittest +from ansible_collections.netapp.um_info.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.um_info.plugins.modules.na_um_svms_info\ + import NetAppUMSVM as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_good': ({}, None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), + 'get_next': (dict(_links=dict(self='me', next=dict(href='next_records'))), None), + 'get_data': (dict(_links=dict(self='me'), records=['data1', 'data2'], total_records=2), None), + 'get_data_missing_field': (dict(_links=dict(self='me'), records=['data1', 'data2']), None), + # module specific responses + 'get_svms': {'name': 'ansible'} +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockUMConnection(object): + ''' mock server connection to Unified Manager host ''' + + def __init__(self): + ''' pass init ''' + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.server = MockUMConnection() + # whether to use a mock or a simulator + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_svms_info.NetAppUMSVM.get_svms') + def test_ensure_list_svms_get_called(self, get_svms): + ''' fetching details of svms ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + my_obj.get_svms = Mock(return_value=SRR['get_svms']) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_svms_info.NetAppUMSVM.get_svms') + def test_ensure_get_called_existing(self, get_svms): + ''' test for existing svms''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.get_svms = Mock(return_value=SRR['get_svms']) + assert my_obj.get_svms() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_get_next(self, mock_request): + ''' test for existing svms''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + assert my_obj.get_svms() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_negative_get_next(self, mock_request): + ''' test for existing svms''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data_missing_field'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_svms() is not None + print(exc.value.args[0]) + msg = 'unexpected response from datacenter/svm/svms' + assert msg in exc.value.args[0]['msg'] + msg = "expecting key: 'total_records'" + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_volumes_info.py b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_volumes_info.py new file mode 100644 index 000000000..4c8a267fb --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/plugins/modules/test_na_um_volumes_info.py @@ -0,0 +1,158 @@ +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" unit tests for Ansible module: na_um_volumes_info """ + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import json +import pytest +import sys + +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.netapp.um_info.tests.unit.compat import unittest +from ansible_collections.netapp.um_info.tests.unit.compat.mock import patch, Mock +import ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils + +from ansible_collections.netapp.um_info.plugins.modules.na_um_volumes_info\ + import NetAppUMVolume as my_module # module under test + + +if not netapp_utils.HAS_REQUESTS and sys.version_info < (2, 7): + pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not be available') + + +# REST API canned responses when mocking send_request +SRR = { + # common responses + 'empty_good': ({}, None), + 'end_of_sequence': (None, "Unexpected call to send_request"), + 'generic_error': (None, "Expected error"), + 'get_next': (dict(_links=dict(self='me', next=dict(href='next_records'))), None), + 'get_data': (dict(_links=dict(self='me'), records=['data1', 'data2'], total_records=2), None), + 'get_data_missing_field': (dict(_links=dict(self='me'), records=['data1', 'data2']), None), + # module specific responses + 'get_volumes': {'name': 'ansible'} +} + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): # pylint: disable=unused-argument + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class MockUMConnection(object): + ''' mock server connection to Unified Manager host ''' + + def __init__(self): + ''' pass init ''' + + +class TestMyModule(unittest.TestCase): + ''' a group of related Unit Tests ''' + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + self.server = MockUMConnection() + # whether to use a mock or a simulator + self.onbox = False + + def set_default_args(self): + if self.onbox: + hostname = '10.10.10.10' + username = 'admin' + password = 'password' + else: + hostname = 'hostname' + username = 'username' + password = 'password' + return dict({ + 'hostname': hostname, + 'username': username, + 'password': password, + }) + + def test_module_fail_when_required_args_missing(self): + ''' required arguments are reported as errors ''' + with pytest.raises(AnsibleFailJson) as exc: + set_module_args({}) + my_module() + print('Info: %s' % exc.value.args[0]['msg']) + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_volumes_info.NetAppUMVolume.get_volumes') + def test_ensure_list_volumes_get_called(self, get_volumes): + ''' fetching details of volumes ''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.server = self.server + my_obj.get_volumes = Mock(return_value=SRR['get_volumes']) + with pytest.raises(AnsibleExitJson) as exc: + my_obj.apply() + assert exc.value.args[0]['changed'] + + @patch('ansible_collections.netapp.um_info.plugins.modules.na_um_volumes_info.NetAppUMVolume.get_volumes') + def test_ensure_get_called_existing(self, get_volumes): + ''' test for existing volumes''' + set_module_args(self.set_default_args()) + my_obj = my_module() + my_obj.get_volumes = Mock(return_value=SRR['get_volumes']) + assert my_obj.get_volumes() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_get_next(self, mock_request): + ''' test for existing volumes''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + assert my_obj.get_volumes() is not None + + @patch('ansible_collections.netapp.um_info.plugins.module_utils.netapp.UMRestAPI.send_request') + def test_negative_get_next(self, mock_request): + ''' test for existing volumes''' + set_module_args(self.set_default_args()) + mock_request.side_effect = [ + SRR['get_next'], + SRR['get_data_missing_field'], + SRR['end_of_sequence'], + ] + my_obj = my_module() + with pytest.raises(AnsibleFailJson) as exc: + my_obj.get_volumes() is not None + print(exc.value.args[0]) + msg = 'unexpected response from datacenter/storage/volumes' + assert msg in exc.value.args[0]['msg'] + msg = "expecting key: 'total_records'" + assert msg in exc.value.args[0]['msg'] diff --git a/ansible_collections/netapp/um_info/tests/unit/requirements.txt b/ansible_collections/netapp/um_info/tests/unit/requirements.txt new file mode 100644 index 000000000..b754473a9 --- /dev/null +++ b/ansible_collections/netapp/um_info/tests/unit/requirements.txt @@ -0,0 +1 @@ +requests ; python_version >= '2.7' -- cgit v1.2.3